summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig118
-rw-r--r--drivers/gpu/drm/Kconfig.debug117
-rw-r--r--drivers/gpu/drm/Makefile12
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1047
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c)27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c981
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c967
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c386
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c421
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c783
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c685
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c294
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c354
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c)30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c291
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c255
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c451
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c128
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c65
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c47
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1014
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c60
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile41
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c)28
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c)29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c)30
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c285
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h453
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c634
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c535
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4398
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h183
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c270
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c355
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c385
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)42
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h184
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h490
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c142
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c362
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h38
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h6
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h3
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h27
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c112
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c54
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c74
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c118
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c368
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c178
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h44
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c76
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c178
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c136
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c368
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h35
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c105
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h27
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c23
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c5
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c149
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c348
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1328
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c569
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c44
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c45
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h34
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c147
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2027
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h53
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c77
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c363
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c42
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c253
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c56
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c243
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c88
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c14
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c15
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c20
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c16
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c19
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c32
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c30
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c18
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c57
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c14
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c10
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c28
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c19
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c12
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c17
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c11
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c14
-rw-r--r--drivers/gpu/drm/bridge/panel.c21
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c9
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c11
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c84
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c16
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c9
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c15
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c24
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c18
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c14
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c64
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c49
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c53
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c11
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c12
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c38
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c255
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c14
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c15
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c19
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build.sh25
-rw-r--r--drivers/gpu/drm/ci/build.yml24
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml22
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml99
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh12
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml20
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh7
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml113
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt132
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt2
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c7
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c275
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c3
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c38
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c598
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c117
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c23
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c7
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c171
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c423
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c61
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c164
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c217
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c60
-rw-r--r--drivers/gpu/drm/drm_buddy.c44
-rw-r--r--drivers/gpu/drm/drm_cache.c9
-rw-r--r--drivers/gpu/drm/drm_client.c42
-rw-r--r--drivers/gpu/drm/drm_client_event.c1
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c259
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c210
-rw-r--r--drivers/gpu/drm/drm_connector.c44
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c166
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h31
-rw-r--r--drivers/gpu/drm/drm_draw.c100
-rw-r--r--drivers/gpu/drm/drm_drv.c119
-rw-r--r--drivers/gpu/drm/drm_edid.c352
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_file.c48
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c679
-rw-r--r--drivers/gpu/drm/drm_format_internal.h174
-rw-r--r--drivers/gpu/drm/drm_fourcc.c45
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c58
-rw-r--r--drivers/gpu/drm/drm_gem.c147
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c62
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c212
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c80
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c820
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c133
-rw-r--r--drivers/gpu/drm/drm_internal.h16
-rw-r--r--drivers/gpu/drm/drm_managed.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c41
-rw-r--r--drivers/gpu/drm/drm_mode_config.c8
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_pagemap.c838
-rw-r--r--drivers/gpu/drm/drm_panel.c198
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c7
-rw-r--r--drivers/gpu/drm/drm_panic.c143
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs108
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c53
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c73
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c11
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c48
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c3
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c8
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c33
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c94
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h130
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c74
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c87
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c206
-rw-r--r--drivers/gpu/drm/i915/Makefile17
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c63
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c33
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c24
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c111
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1252
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c113
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c261
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c796
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h69
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c227
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1026
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c398
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h60
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c122
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c993
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2932
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c585
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h493
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c148
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c228
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c222
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c517
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c566
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h80
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c221
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c279
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c677
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c673
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c164
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c243
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c275
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.c)218
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.h)28
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c648
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c289
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c350
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c23
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c60
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c1158
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h55
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c192
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c164
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c145
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c151
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c86
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c54
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c124
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h73
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c54
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c268
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3027
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h108
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c13
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h27
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c6
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c4
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c109
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h26
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c316
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h56
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c (renamed from drivers/gpu/drm/i915/vlv_sideband.c)178
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/i915/vlv_sideband_reg.h)6
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h125
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h40
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c16
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c40
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c26
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c31
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c199
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c9
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c10
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c6
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c16
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/loongson/Kconfig2
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c10
-rw-r--r--drivers/gpu/drm/mediatek/Makefile8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c424
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c19
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c55
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c82
-rw-r--r--drivers/gpu/drm/msm/Kconfig37
-rw-r--r--drivers/gpu/drm/msm/Makefile27
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c92
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c128
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c206
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c86
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c173
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h60
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h63
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h408
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c112
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h108
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c178
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c224
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c181
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c75
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c50
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c51
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c261
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h34
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c216
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1298
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h113
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c741
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c218
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c270
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c75
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c129
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c142
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h36
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c115
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c75
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c93
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c129
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c383
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h87
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c541
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h298
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c69
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c335
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1514
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c230
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h153
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h14
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c314
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c59
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h48
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c322
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h40
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c96
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml3582
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml198
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml383
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml223
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml305
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml14
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py8
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c52
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c209
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c93
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h132
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c213
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c121
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c)396
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c)417
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1586
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c698
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c271
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig14
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs69
-rw-r--r--drivers/gpu/drm/nova/file.rs74
-rw-r--r--drivers/gpu/drm/nova/gem.rs49
-rw-r--r--drivers/gpu/drm/nova/nova.rs18
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig77
-rw-r--r--drivers/gpu/drm/panel/Makefile7
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c11
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c27
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c594
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c236
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c10
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c12
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c13
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c12
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c12
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c20
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1692
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c50
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c250
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c119
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c291
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c132
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c10
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c79
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c9
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c257
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c76
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h34
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c154
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c188
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c13
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h102
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c137
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c19
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c226
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h79
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c162
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c68
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h104
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c20
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c3
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c98
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c11
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig15
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c13
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c123
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c360
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c103
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c293
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c483
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c315
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h34
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c92
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c94
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h103
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c48
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c238
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c370
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h225
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c563
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig42
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1079
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)0
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)0
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c43
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c40
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c41
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c168
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c6
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h188
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c458
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c107
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c389
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)461
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)263
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c531
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile5
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c521
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c3
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c266
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c28
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c809
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h374
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c62
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c287
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h20
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c13
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c598
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig64
-rw-r--r--drivers/gpu/drm/tiny/Makefile4
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c30
-rw-r--r--drivers/gpu/drm/tiny/bochs.c19
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c145
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c46
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c5
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c60
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c61
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c245
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h20
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c126
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h30
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c29
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c21
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c101
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c36
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c154
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c30
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h7
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c30
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c3
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile7
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c951
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c280
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c4
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c71
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h56
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c467
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c176
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c889
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
-rw-r--r--drivers/gpu/drm/xe/Kconfig38
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug11
-rw-r--r--drivers/gpu/drm/xe/Makefile27
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h36
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h14
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h29
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h48
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h)2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c91
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c134
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c73
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c11
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c66
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c135
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c15
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c4
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c63
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c90
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c674
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h47
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c391
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c411
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h27
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c62
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c108
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c188
-rw-r--r--drivers/gpu/drm/xe/xe_device.h55
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c251
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h125
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c10
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c28
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c45
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c266
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h24
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c275
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c135
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c87
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c30
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c50
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c120
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c76
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c178
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c75
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c174
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c318
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c90
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c66
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c214
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c46
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c106
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c423
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c159
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c205
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c314
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c108
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c5
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c40
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c47
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c666
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c332
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h62
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c9
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c71
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c511
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c298
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c45
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_module.c58
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c167
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c231
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c111
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c50
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h42
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c43
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h30
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c104
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c84
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c365
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c147
-rw-r--r--drivers/gpu/drm/xe/xe_query.c35
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c29
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c50
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h14
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c17
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c98
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c88
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h7
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c163
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c88
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h1
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c487
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h221
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c13
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c254
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h4
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c5
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c86
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h6
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c152
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c463
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h93
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h38
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c5
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c24
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c134
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h22
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules25
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c3
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c79
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c5
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c3
2402 files changed, 118364 insertions, 64621 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f01925ed8176..f7ea8e895c0c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,65 +42,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -242,28 +188,12 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -335,6 +265,8 @@ config DRM_SCHED
tristate
depends on DRM
+source "drivers/gpu/drm/sysfb/Kconfig"
+
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -343,6 +275,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -454,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
@@ -462,7 +398,7 @@ source "drivers/gpu/drm/imagination/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -474,9 +410,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -489,31 +422,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
-config DRM_HEADER_TEST
- bool "Ensure DRM headers are self-contained and pass kernel-doc"
- depends on DRM && EXPERT && BROKEN
- default n
- help
- Ensure the DRM subsystem headers both under drivers/gpu/drm and
- include/drm compile, are self-contained, have header guards, and have
- no kernel-doc warnings.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..05dc43c0b8c5
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,117 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ed54a546bbe2..4dafbdc8f86a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -104,7 +104,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
-obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
+
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
@@ -134,6 +138,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_flip_work.o \
@@ -176,6 +181,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -204,6 +210,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
@@ -219,6 +226,7 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
@@ -236,7 +244,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index ad80542b60ed..cba7d32150a9 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
};
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adp_mipi_drv_private *adp =
container_of(bridge, struct adp_mipi_drv_private, bridge);
- return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
@@ -228,9 +229,10 @@ static int adp_mipi_probe(struct platform_device *pdev)
{
struct adp_mipi_drv_private *adp;
- adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
- if (!adp)
- return -ENOMEM;
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
adp->mipi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adp->mipi)) {
@@ -240,7 +242,6 @@ static int adp_mipi_probe(struct platform_device *pdev)
adp->dsi.dev = &pdev->dev;
adp->dsi.ops = &adp_dsi_host_ops;
- adp->bridge.funcs = &adp_dsi_bridge_funcs;
adp->bridge.of_node = pdev->dev.of_node;
adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(&pdev->dev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1a11cab741ac..1acfed2f92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,7 +2,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
select DRM_CLIENT
@@ -68,7 +68,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
@@ -77,7 +76,7 @@ config DRM_AMDGPU_USERPTR
config DRM_AMD_ISP
bool "Enable AMD Image Signal Processor IP support"
- depends on DRM_AMDGPU
+ depends on DRM_AMDGPU && ACPI
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index aacc810cabb3..930de203d533 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -174,7 +174,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -253,6 +256,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index e13fbd974141..9569dc16dd3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
@@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef6e78224fdf..ef3af170dda4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -113,6 +113,8 @@
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
@@ -228,7 +230,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -266,8 +268,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -466,9 +470,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -488,7 +489,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -501,6 +501,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -512,12 +517,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
volatile uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -551,6 +606,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -568,6 +624,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
AMD_RESET_METHOD_ON_INIT,
@@ -821,6 +878,12 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
@@ -829,6 +892,12 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+};
+
/*
* Custom Init levels could be defined for different situations where a full
* initialization of all hardware blocks are not expected. Sample cases are
@@ -853,6 +922,14 @@ struct amdgpu_init_level {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
+
+
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1081,6 +1158,13 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
/* df */
struct amdgpu_df df;
@@ -1160,6 +1244,8 @@ struct amdgpu_device {
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1193,10 +1279,12 @@ struct amdgpu_device {
bool debug_enable_ras_aca;
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
- bool enforce_isolation[MAX_XCP];
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
struct amdgpu_isolation {
void *owner;
struct dma_fence *spearhead;
@@ -1210,6 +1298,10 @@ struct amdgpu_device {
* in KFD: VRAM or GTT.
*/
bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1243,6 +1335,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1294,7 +1391,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1464,16 +1562,17 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1525,6 +1624,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1575,7 +1675,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1606,19 +1707,24 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
+#endif
+
+#if defined(CONFIG_DRM_AMD_ISP)
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1664,4 +1770,19 @@ extern const struct attribute_group amdgpu_flash_attr_group;
void amdgpu_set_init_level(struct amdgpu_device *adev,
enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index b4ad163f42a7..cbc40cad581b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -115,11 +115,19 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b7f8f2ff143d..6c62e27b9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1532,23 +1532,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
return true;
#endif /* CONFIG_AMD_PMC */
}
+#endif /* CONFIG_SUSPEND */
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
+#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
+static const struct acpi_device_id isp_sensor_ids[] = {
+ { "OMNI5C10" },
+ { }
+};
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
+static int isp_match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
}
-#endif /* CONFIG_SUSPEND */
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
+{
+ struct device *pdev __free(put_device) = NULL;
+ struct acpi_device *acpi_pdev;
+
+ pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
+ isp_match_acpi_device_ids);
+ if (!pdev)
+ return -EINVAL;
+
+ acpi_pdev = ACPI_COMPANION(pdev);
+ if (!acpi_pdev)
+ return -ENODEV;
+
+ *dev = acpi_pdev;
+
+ return 0;
+}
+#endif /* CONFIG_DRM_AMD_ISP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4cec3a873995..fbe7616555c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
+{
+ if (adev->kfd.dev)
+ kgd2kfd_suspend_process(adev->kfd.dev);
+}
+
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -368,6 +384,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+ if (!bo || !*bo)
+ return;
+
(void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
@@ -639,7 +658,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -746,12 +765,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b6ca41859b53..33eb4826b58b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -154,8 +154,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -411,16 +413,18 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
@@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -489,12 +502,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index ffbaa8bc5eea..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ca4a6b82817f..df77558e03ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0f3e2944edd7..e68c0fa8d751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
lower_32_bits(page_table_base));
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d2ec4130a316..260165bbe373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2559,6 +2559,18 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
+ mem->va);
+ }
+
ret = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index eb015bdda8a7..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 68bce6a6d09d..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 360e07a5c7c1..25252231a68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -212,7 +212,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
- CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
section->hdr.valid_bits.err_info_cnt = 1;
@@ -326,7 +326,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
return -ENOMEM;
}
- amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
if (ret)
return ret;
@@ -457,7 +459,7 @@ calc:
void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
{
- u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+ u64 pos, wptr_old, rptr;
int rec_cnt_dw = count >> 2;
u32 chunk, ent_sz;
u8 *s = (u8 *)src;
@@ -470,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
return;
}
+ mutex_lock(&ring->adev->cper.ring_lock);
+
wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
- mutex_lock(&ring->adev->cper.ring_lock);
while (count) {
ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
chunk = umin(ent_sz, count);
@@ -549,7 +553,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
r = amdgpu_cper_ring_init(adev);
@@ -568,7 +572,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
int amdgpu_cper_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
adev->cper.enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82df06a72ee0..d3f220be2ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -293,10 +293,29 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,6 +368,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
@@ -1116,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..dfb6cfd83760 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c43d1b6e5d66..f5d5c45ddc0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -944,24 +944,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
drm_sched_entity_fini(entity);
}
}
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
}
}
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
- struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id;
-
amdgpu_ctx_mgr_entity_fini(mgr);
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
- DRM_ERROR("ctx %p is still alive\n", ctx);
- }
-
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 85376baaa92f..090dfe86f75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a1450f13d963..0e6e2e2acf5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -1902,7 +1902,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence.base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
@@ -2130,6 +2131,55 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2139,4 +2189,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 7b50741dc097..8a026bc9ea44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
coredump->reset_time.tv_nsec);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7f354cd532dc..01d234cf8156 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -85,6 +85,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -231,7 +232,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
{
int ret = 0;
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
ret = sysfs_create_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
@@ -240,7 +241,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
sysfs_remove_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
}
@@ -410,19 +411,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -431,15 +429,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
@@ -452,29 +448,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
* 1 if the device supports BACO;
* 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -494,10 +485,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -511,12 +504,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
+ if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ if (!amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
@@ -545,14 +539,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -1286,14 +1280,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1310,15 +1304,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1334,14 +1330,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1358,15 +1355,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1384,8 +1383,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1405,8 +1405,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
@@ -1680,6 +1681,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
if ((amdgpu_runtime_pm != 0) &&
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
@@ -1689,7 +1693,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1729,9 +1735,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
r = pci_resize_resource(adev->pdev, 0, rbar_size);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
pci_assign_unassigned_bus_resources(adev->pdev->bus);
@@ -1833,8 +1840,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -1870,6 +1877,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1894,7 +1930,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -1981,7 +2017,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -1996,7 +2032,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -2004,7 +2040,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -2112,8 +2148,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -2133,7 +2192,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2145,12 +2205,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2217,8 +2278,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2251,8 +2313,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2468,9 +2531,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2481,8 +2546,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2504,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2528,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
chip_name = "navi12";
break;
}
@@ -2689,6 +2754,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2701,7 +2773,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -2710,21 +2781,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
ip_block = &adev->ip_blocks[i];
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else if (ip_block->version->funcs->early_init) {
r = ip_block->version->funcs->early_init(ip_block);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
total = false;
} else {
adev->ip_blocks[i].status.valid = true;
@@ -2805,8 +2884,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2830,8 +2911,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2869,8 +2951,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2925,25 +3010,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -2975,8 +3064,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->sw_init) {
r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
goto init_failed;
}
}
@@ -2990,7 +3081,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3002,17 +3094,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3024,14 +3120,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -3172,6 +3270,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3220,8 +3320,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3257,8 +3359,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3324,8 +3428,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3334,7 +3440,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
@@ -3348,7 +3454,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3381,7 +3487,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3395,17 +3503,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
if (!ip_block->version->funcs->hw_fini) {
- DRM_ERROR("hw_fini of IP block <%s> not defined\n",
- ip_block->version->funcs->name);
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
} else {
r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- ip_block->version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
}
@@ -3446,15 +3556,17 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
+ amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -3468,7 +3580,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
}
return 0;
@@ -3516,8 +3629,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
}
}
adev->ip_blocks[i].status.sw = false;
@@ -3550,7 +3665,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3691,8 +3806,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
return r;
}
}
@@ -3976,12 +4092,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -4024,7 +4142,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -4043,7 +4163,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -4065,13 +4185,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -4085,7 +4205,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -4099,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
int ret = 0;
/*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
+ * By default timeout for jobs is 10 sec
*/
- adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
while ((timeout_setting = strsep(&input, ",")) &&
@@ -4209,7 +4322,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
}
/**
@@ -4225,7 +4338,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4277,9 +4389,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4307,9 +4421,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_sync_create(&adev->isolation[i].active);
amdgpu_sync_create(&adev->isolation[i].prev);
}
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4329,12 +4444,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4391,8 +4510,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -4529,7 +4650,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4639,12 +4760,12 @@ fence_driver_init:
r = amdgpu_pm_sysfs_init(adev);
if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
} else
adev->ucode_sysfs_en = true;
@@ -4659,7 +4780,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_cfg_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4677,7 +4798,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4789,7 +4910,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_cfg_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4843,7 +4964,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->xcp_mgr);
adev->xcp_mgr = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4892,8 +5013,16 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -4907,28 +5036,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
- * It is used to evict resources from the device before the system goes to
- * sleep while there is still access to swap.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
- fallthrough;
- case PM_SUSPEND_PREPARE:
- r = amdgpu_device_evict_resources(adev);
- /*
- * This is considered non-fatal at this time because
- * amdgpu_device_prepare() will also fatally evict resources.
- * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
- */
- if (r)
- drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
break;
}
@@ -4949,15 +5070,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4968,15 +5087,32 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
+}
-unprepare:
- adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
+/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- return r;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
}
/**
@@ -5000,14 +5136,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_s0ix && !adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
+ dev_warn(adev->dev, "smart shift update failed\n");
if (notify_clients)
drm_client_dev_suspend(adev_to_drm(adev), false);
@@ -5018,8 +5156,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix)
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ if (!adev->in_s0ix) {
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
+ }
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5041,6 +5181,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
return 0;
}
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
+ return 0;
+}
+
/**
* amdgpu_device_resume - initiate device resume
*
@@ -5062,6 +5228,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5083,7 +5255,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
}
if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
+
+ r = amdgpu_userq_resume(adev);
if (r)
goto exit;
}
@@ -5098,6 +5274,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!adev->in_s0ix && !r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -5132,13 +5311,12 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
- adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
+ adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -5517,6 +5695,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!adev->pcie_reset_ctx.occurs_dpc)
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5646,7 +5847,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -5821,6 +6024,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5924,117 +6128,85 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
-
- /*
- * If it reaches here because of hang/timeout and a RAS error is
- * detected at the same time, let RAS recovery take care of it.
- */
- if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
- !amdgpu_sriov_vf(adev) &&
- reset_context->src != AMDGPU_RESET_SRC_RAS) {
- dev_dbg(adev->dev,
- "Gpu recovery from source: %d yielding to RAS error recovery handling",
- reset_context->src);
- return 0;
- }
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
-
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
+ int r;
- reset_context->job = job;
- reset_context->hive = hive;
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
}
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_device_health_check(device_list_handle);
+ if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
+ r = amdgpu_device_health_check(device_list);
if (r)
- goto end_reset;
+ return r;
}
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ return 0;
+}
+
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i;
+
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -6048,7 +6220,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -6066,6 +6238,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
+ (!adev->pcie_reset_ctx.occurs_dpc) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
@@ -6082,25 +6255,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
+}
- if (need_emergency_restart)
- goto skip_sched_resume;
-
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6112,6 +6283,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -6126,12 +6302,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -6141,10 +6317,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6175,13 +6359,23 @@ skip_hw_reset:
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -6192,18 +6386,114 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
+}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ dev_warn(adev->dev, "Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!\n",
+ need_emergency_restart ? "jobs stop":"reset");
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
+ goto end_reset;
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto reset_unlock;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto reset_unlock;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6215,8 +6505,17 @@ end_reset:
atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
@@ -6535,12 +6834,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6550,13 +6848,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6587,12 +6884,14 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev, "No support for XGMI hive yet...\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6600,32 +6899,30 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
-
- drm_sched_stop(&ring->sched, NULL);
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ adev->pcie_reset_ctx.occurs_dpc = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
- atomic_inc(&adev->gpu_reset_counter);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6638,8 +6935,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6663,10 +6962,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ int r = 0, i;
+ u32 memsize;
/* PCI error slot reset should be skipped During RAS recovery */
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
@@ -6674,15 +6975,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
-
/* wait for asic to come out of reset */
- msleep(500);
+ msleep(700);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6703,26 +7001,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ }
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6739,26 +7051,37 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
- drm_sched_start(&ring->sched, 0);
- }
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ adev->pcie_reset_ctx.occurs_dpc = false;
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
@@ -6777,11 +7100,11 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
@@ -6802,7 +7125,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -7048,7 +7371,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
if (r)
- DRM_WARN("OOM tracking isolation\n");
+ dev_warn(adev->dev, "OOM tracking isolation\n");
out_grab_ref:
dma_fence_get(dep);
@@ -7116,9 +7439,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 9e738fae2b74..efe0058b48ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -270,12 +270,13 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
+ bool sz_valid = true;
uint64_t vram_size;
- u32 msg;
int i, ret = 0;
+ u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -283,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -291,9 +292,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
}
- vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ sz_valid = false;
+ else
+ vram_size <<= 20;
- if (vram_size) {
+ if (sz_valid) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->mman.discovery_tmr_size, false);
@@ -301,6 +306,11 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
+ if (ret)
+ dev_err(adev->dev,
+ "failed to read discovery info from memory, vram size read: %llx",
+ vram_size);
+
return ret;
}
@@ -311,10 +321,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
const struct firmware *fw;
int r;
- r = request_firmware(&fw, fw_name, adev->dev);
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
- dev_err(adev->dev, "can't load firmware \"%s\"\n",
- fw_name);
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@@ -449,16 +461,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
- dev_info(adev->dev, "use ip discovery information from file");
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
-
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from file\n");
- r = -EINVAL;
+ if (r)
goto out;
- }
-
} else {
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
@@ -1328,10 +1336,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
int r;
r = amdgpu_discovery_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_discovery_init failed\n");
+ if (r)
return r;
- }
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
@@ -2549,38 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- /* this is not fatal. We have a fallback below
- * if the new firmwares are not present. some of
- * this will be overridden below to keep things
- * consistent with the current behavior.
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
*/
- r = amdgpu_discovery_reg_base_init(adev);
- if (!r) {
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- }
- break;
- default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- break;
- }
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2603,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2625,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@@ -2666,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@@ -2689,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@@ -2717,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@@ -2743,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 35c778426a7c..51bab32fd8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dfa0d642ac16..930c171473b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 44e120f9f764..ff98c87b2e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -513,8 +513,8 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
if (!adev)
return false;
- if (obj->import_attach) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dma_buf = obj->dma_buf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 24ee4710f807..395c6be901ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -51,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -123,9 +125,10 @@
* - 3.61.0 - Contains fix for RV/PCO compute queues
* - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
* - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 63
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -140,6 +143,8 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -176,7 +181,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
@@ -238,6 +243,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -355,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* The second one is for Compute. The third and fourth ones are
* for SDMA and Video.
*
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 10000 for all jobs. "
+ "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -1033,11 +1040,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
/**
* DOC: modeset (int)
@@ -1096,6 +1105,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2244,6 +2275,15 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: use vram for smu pool\n");
adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
}
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2287,7 +2327,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2417,10 +2457,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2453,9 +2493,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2472,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
@@ -2501,6 +2542,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2517,11 +2562,14 @@ static int amdgpu_pmops_prepare(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2536,7 +2584,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2615,18 +2663,22 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
- r = amdgpu_device_resume(drm_dev, true);
- adev->in_s4 = false;
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering())
+ return 0;
- return r;
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2634,9 +2686,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
-
- adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
@@ -2708,6 +2757,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2723,6 +2795,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2776,7 +2851,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2817,7 +2892,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -2844,12 +2919,30 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ if (fpriv) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2901,7 +2994,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2948,6 +3041,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3034,7 +3130,7 @@ static int __init amdgpu_init(void)
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
+ r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3068,7 +3164,7 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..23d7d0b0d625
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_fence_get(&ev_fence->base);
+ else
+ goto unlock;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(&ev_fence->base);
+ return;
+
+unlock:
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5f5c00ace96b..9e7506965cab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,37 +42,6 @@
#include "amdgpu_reset.h"
/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
-/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
@@ -130,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -146,40 +115,35 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
+ if (!af) {
+ /* create a separate hw fence */
+ am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+ if (!am_fence)
return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
+ am_fence->context = 0;
} else {
- /* take use of job-embedded fence */
- fence = &job->hw_fence;
+ am_fence = af;
}
+ fence = &am_fence->base;
+ am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
+ am_fence->seq = seq;
+ if (af) {
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ /* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
} else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(fence);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -292,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -304,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -322,11 +293,13 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
*/
static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = from_timer(ring, t,
- fence_drv.fallback_timer);
+ struct amdgpu_ring *ring = timer_container_of(ring, t,
+ fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -718,7 +691,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
- job = container_of(old, struct amdgpu_job, hw_fence);
+ job = container_of(old, struct amdgpu_job, hw_fence.base);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
@@ -764,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/**
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @fence: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+{
+ dma_fence_set_error(&fence->base, -ETIME);
+ amdgpu_fence_write(fence->ring, fence->seq);
+ amdgpu_fence_process(fence->ring);
+}
+
+void amdgpu_fence_save_wptr(struct dma_fence *fence)
+{
+ struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
+
+ am_fence->wptr = am_fence->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr, i, seqno;
+
+ seqno = amdgpu_fence_read(ring);
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
+ ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ }
+}
+
/*
* Common fence implementation
*/
@@ -780,7 +833,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
@@ -810,7 +863,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@@ -830,7 +883,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+ kfree(to_amdgpu_fence(f));
}
/**
@@ -845,7 +898,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 1ae88c459da5..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 69429df09477..6626a6e64ff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,6 +36,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -44,6 +45,114 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ size_mul(sizeof(uint32_t), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -184,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -199,8 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
- !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) {
@@ -211,7 +328,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -247,6 +364,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -321,10 +441,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
@@ -638,18 +754,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -665,6 +786,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+
+ return fence;
}
/**
@@ -713,6 +836,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
@@ -774,6 +900,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -802,6 +934,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
@@ -827,12 +967,24 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
@@ -871,7 +1023,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cf2df7790077..c80c8f543532 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,14 +75,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
@@ -147,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -238,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -247,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -258,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -671,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -680,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -709,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -731,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -745,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -766,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
@@ -1027,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -1351,6 +1354,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1394,8 +1401,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
@@ -1466,6 +1479,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1552,6 +1567,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1594,7 +1612,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
* Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
* feature for each GPU partition. Reading from the 'enforce_isolation'
* sysfs file returns the isolation settings for all partitions, where '0'
- * indicates disabled and '1' indicates enabled.
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
*
* Return: The number of bytes read from the sysfs file.
*/
@@ -1629,9 +1648,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
* @count: The size of the input data
*
* This function allows control over the 'enforce_isolation' feature, which
- * serializes access to the graphics engine. Writing '1' or '0' to the
- * 'enforce_isolation' sysfs file enables or disables process isolation for
- * each partition. The input should specify the setting for all partitions.
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
*
* Return: The number of bytes written to the sysfs file.
*/
@@ -1668,13 +1690,34 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
- for (i = 0; i < num_partitions; i++)
- adev->enforce_isolation[i] = partition_values[i];
+ for (i = 0; i < num_partitions; i++) {
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
@@ -1923,39 +1966,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -2000,12 +2045,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
- }
+ adev->gfx.userq_sch_inactive[idx] = false;
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
@@ -2029,7 +2075,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
/* set the initial values if nothing is set */
if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
@@ -2096,7 +2142,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2133,7 +2179,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2182,6 +2228,9 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
enum PP_SMC_POWER_PROFILE profile;
int r;
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
if (adev->gfx.num_gfx_rings)
profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
else
@@ -2212,11 +2261,84 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
atomic_dec(&ring->adev->gfx.total_submission_cnt);
schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
}
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
/*
* debugfs for to enable/disable gfx job submission to specific core.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 87e862188766..08f268dab8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -170,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -221,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -305,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -474,9 +518,9 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
@@ -484,6 +528,9 @@ struct amdgpu_gfx {
struct delayed_work idle_work;
bool workload_profile_active;
struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -503,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -550,8 +597,6 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
@@ -597,6 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index ecb74ccf1d90..97b562a79ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -1041,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1056,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1230,6 +1249,10 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if ((mode >= ARRAY_SIZE(nps_desc)) ||
(BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
@@ -1425,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index bd7fc123b8f9..397c6ccdb903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -62,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -81,6 +84,8 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+#define AMDGPU_MAX_MEM_RANGES 8
+
/*
* GMC page fault information
*/
@@ -301,6 +306,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
@@ -390,6 +396,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -451,5 +458,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
int nps_mode);
void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
-
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..6e02fb9ac2f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..4cfd932b7e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,6 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 8179d0814db9..57101d24422f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2ea98ec60220..7d9bcb72e8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = &job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
@@ -161,14 +167,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = NULL;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, f, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(*f);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 4c4e087230ac..5dd78a9cb12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -576,8 +576,16 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&id_mgr->ids_lru);
id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -588,7 +596,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
/* alloc a default reserved vmid to enforce isolation */
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 901f8b12c672..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -217,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b0a88f92cd82..7f7ea046e209 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -72,6 +72,7 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
index bea255529993..2490fd322aec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-static const struct nvkm_engine_func
-ga102_ofa = {
- .sclass = {
- { -1, -1, NVC7FA_VIDEO_OFA },
- {}
- }
-};
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
-int
-ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
- return -ENODEV;
-}
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -619,6 +623,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,10 +733,10 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 43fc941dfa57..9cddbf50442a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,6 +33,8 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
/**
* isp_hw_init - start and test isp block
*
@@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 4f3b7b5d9c1f..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -54,6 +51,7 @@ struct amdgpu_isp {
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..9b1c55115921 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -124,53 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
if (unlikely(adev->debug_disable_gpu_ring_reset)) {
dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
- bool is_guilty;
-
- dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
-
- /* for engine resets, we need to reset the engine,
- * but individual queues may be unaffected.
- * check here to make sure the accounting is correct.
- */
- if (ring->funcs->is_guilty)
- is_guilty = ring->funcs->is_guilty(ring);
- else
- is_guilty = true;
-
- if (is_guilty)
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
- r = amdgpu_ring_reset(ring, job->vmid);
+ } else if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
- if (is_guilty) {
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- }
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched, 0);
- dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
- dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
@@ -198,13 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
if (num_ibs == 0)
return -EINVAL;
@@ -222,7 +202,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -232,7 +213,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
if (r)
return r;
@@ -272,8 +253,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence.base.ops)
+ f = &job->hw_fence.base;
else
f = NULL;
@@ -290,10 +271,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
/* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -322,10 +303,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
@@ -384,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..2f302266662b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -48,7 +48,7 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -78,6 +78,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
@@ -90,7 +91,8 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index dda29132dfb2..82d58ac7afb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -463,7 +463,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
sizeof(uint32_t), GFP_KERNEL);
if (!adev->jpeg.ip_dump) {
- DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
return -ENOMEM;
}
adev->jpeg.reg_list = reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 27bfe9c8af06..8a76960803c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -90,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -160,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -378,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -387,24 +409,45 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -414,7 +457,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +467,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +480,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +493,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -460,7 +507,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +523,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +532,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -539,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -978,6 +1029,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1293,6 +1346,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1346,6 +1415,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
+
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_pasid;
@@ -1376,6 +1447,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fb212f0a1136..135598502c8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -83,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -142,19 +106,52 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
+ if (i >= num_pipes)
break;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (i >= adev->sdma.num_instances)
+ if (i >= num_pipes)
break;
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
@@ -240,244 +237,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -497,7 +256,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -521,306 +280,8 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
-
- return r;
-}
-
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
+ dev_err(adev->dev, "failed to resume all gangs");
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
-
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
return r;
}
@@ -839,9 +300,11 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -862,9 +325,11 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
queue_input.trail_fence_addr = gpu_addr;
queue_input.trail_fence_data = seq;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -874,7 +339,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -888,10 +353,15 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
return r;
}
@@ -905,7 +375,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
uint32_t *read_val_ptr;
if (amdgpu_device_wb_get(adev, &addr_offset)) {
- DRM_ERROR("critical bug! too many mes readers\n");
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
goto error;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
@@ -915,13 +385,15 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
val = *(read_val_ptr);
@@ -942,14 +414,16 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -969,39 +443,16 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
- r = -EINVAL;
- goto error;
- }
-
- r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
- if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
-
-error:
- return r;
-}
-
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- struct mes_misc_op_input op_input;
- int r;
-
- op_input.op = MES_MISC_OP_WRM_REG_WAIT;
- op_input.wrm_reg.reg0 = reg;
- op_input.wrm_reg.ref = val;
- op_input.wrm_reg.mask = mask;
-
- if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -1018,7 +469,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1042,7 +494,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -1056,7 +508,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1068,515 +521,19 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
-
- amdgpu_mes_unlock(&adev->mes);
-
- return r;
-}
-
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
-#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
-do { \
- if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].slots[id_offs]); \
- else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ring); \
- else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ib); \
- else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].padding); \
-} while(0)
-
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
-{
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
- break;
- case AMDGPU_RING_TYPE_SDMA:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
- break;
- default:
- break;
- }
-
- WARN_ON(1);
- return -EINVAL;
-}
-
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
-
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
amdgpu_mes_unlock(&adev->mes);
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
return r;
}
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- timer_delete_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1690,7 +647,9 @@ static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to change_config.\n");
@@ -1705,7 +664,7 @@ int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
else
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index da2c9a8cb3e0..c0d2c195fe2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -111,8 +111,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -149,19 +149,6 @@ struct amdgpu_mes {
};
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
-};
-
struct amdgpu_mes_gang {
int gang_id;
int priority;
@@ -248,18 +235,6 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
-};
-
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -291,7 +266,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -301,6 +276,8 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
};
enum mes_misc_opcode {
@@ -388,9 +365,6 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
};
@@ -398,32 +372,13 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -438,8 +393,6 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
@@ -451,27 +404,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 79c2f807b9fe..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0b9987781f76..122a88294883 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -351,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
-EXPORT_SYMBOL(amdgpu_bo_create_kernel);
/**
* amdgpu_bo_create_isp_user - create user BO for isp
@@ -420,7 +420,6 @@ error_unreserve:
return r;
}
-EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -524,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
-EXPORT_SYMBOL(amdgpu_bo_free_kernel);
/**
* amdgpu_bo_free_isp_user - free BO for isp use
@@ -547,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
}
amdgpu_bo_unref(&bo);
}
-EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -939,7 +936,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -967,7 +964,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -1018,7 +1015,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -1044,7 +1041,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1262,7 +1260,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1472,6 +1470,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
@@ -1644,7 +1662,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 375448627f7b..c316920f3450 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -304,6 +304,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index df5d5dbd7f0f..23484317a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp_v14_0_set_psp_funcs(psp);
break;
case IP_VERSION(14, 0, 5):
@@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -654,6 +662,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "BOOT_CFG";
case GFX_CMD_ID_CONFIG_SQ_PERFMON:
return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
default:
return "UNKNOWN CMD";
}
@@ -871,6 +883,8 @@ static int psp_tmr_init(struct psp_context *psp)
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -984,6 +998,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1270,6 +1384,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -2214,7 +2333,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -2336,11 +2456,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
return false;
}
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2439,6 +2575,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
@@ -3521,8 +3665,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3798,8 +3946,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -4116,8 +4268,8 @@ rel_buf:
static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
- .write_new = amdgpu_psp_vbflash_write,
- .read_new = amdgpu_psp_vbflash_read,
+ .write = amdgpu_psp_vbflash_write,
+ .read = amdgpu_psp_vbflash_read,
};
/**
@@ -4180,11 +4332,115 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
const struct attribute_group amdgpu_flash_attr_group = {
.attrs = flash_attrs,
- .bin_attrs_new = bin_flash_attrs,
+ .bin_attrs = bin_flash_attrs,
.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8d5acc415d38..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,29 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -107,9 +130,13 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -137,11 +164,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -319,6 +349,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -406,6 +444,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -464,6 +505,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -475,6 +520,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -486,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -553,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
@@ -569,5 +618,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 443409d4f4b0..540817e296da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1107,6 +1107,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -1498,6 +1501,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -2121,7 +2127,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
- group.bin_attrs_new = bin_attrs;
+ group.bin_attrs = bin_attrs;
}
r = sysfs_create_group(&adev->dev->kobj, &group);
@@ -2161,7 +2167,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
- /**
+ /*
* If the current interrupt is caused by a non-fatal RAS error, skip
* check for fatal error. For fatal errors, FED status of all devices
* in XGMI hive gets set when the first device gets fatal error
@@ -2851,11 +2857,27 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
} else {
if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
return -EINVAL;
}
} else {
+ if (bps[0].address == 0) {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+ }
+
if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
if (nps == AMDGPU_NPS1_PARTITION_MODE)
memcpy(err_data->err_addr, bps,
@@ -2873,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
struct eeprom_table_record *bps, struct ras_err_data *err_data,
enum amdgpu_memory_partition nps)
{
+ int i = 0;
enum amdgpu_memory_partition save_nps;
save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
@@ -2882,10 +2905,30 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
} else {
- if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
- return -EINVAL;
+ if (bps->address) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ } else {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+
+ if (amdgpu_ras_mca2pa(adev, bps, err_data))
+ return -EOPNOTSUPP;
+ }
}
+
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
adev->umc.retire_unit);
}
@@ -2900,7 +2943,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2921,34 +2964,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
if (from_rom) {
- for (i = 0; i < pages; i++) {
- if (control->ras_num_recs - i >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- //deal with retire_unit records a time
- ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
- &bps[i], &err_data, nps);
- if (ret)
- goto free;
- i += (adev->umc.retire_unit - 1);
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
} else {
break;
}
- } else {
- break;
}
}
for (; i < pages; i++) {
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- goto free;
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
}
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
-free:
if (from_rom)
kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
@@ -2976,6 +3021,15 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
@@ -3037,21 +3091,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- for (i = 0; i < control->ras_num_recs; i++) {
- if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- control->ras_num_pa_recs += adev->umc.retire_unit;
- i += (adev->umc.retire_unit - 1);
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
} else {
- control->ras_num_mca_recs +=
- (control->ras_num_recs - i);
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
- } else {
- control->ras_num_mca_recs += (control->ras_num_recs - i);
- break;
}
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
}
}
@@ -3260,7 +3321,6 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
uint64_t de_queried_count;
uint32_t new_detect_count, total_detect_count;
uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
@@ -3289,21 +3349,13 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
+ if (!--timeout)
break;
- }
msleep(1);
}
}
} while (total_detect_count < need_query_count);
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
-
if (total_detect_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
@@ -3454,16 +3506,21 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
control = &con->eeprom_control;
ret = amdgpu_ras_eeprom_init(control);
- if (ret)
- return ret;
+ control->is_eeprom_valid = !ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
- if (control->ras_num_recs) {
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
+ if (control->ras_num_recs && control->is_eeprom_valid) {
ret = amdgpu_ras_load_bad_pages(adev);
- if (ret)
- return ret;
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
amdgpu_dpm_send_hbm_bad_pages_num(
adev, control->ras_num_bad_pages);
@@ -3482,7 +3539,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
- return ret;
+ return 0;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
@@ -3691,7 +3748,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev
*/
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else
@@ -3793,10 +3851,12 @@ init_ras_enabled_flag:
adev->ras_hw_enabled & amdgpu_ras_mask;
/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
- adev->aca.is_enabled =
- (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -4373,8 +4433,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
- if (ras)
+ if (ras) {
ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
}
void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
@@ -4479,8 +4541,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
- if (amdgpu_ras_mark_ras_event(adev, type))
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
+ }
event_id = amdgpu_ras_acquire_event_id(adev, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 0ea7cfaf3587..9bda9ad13f88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -418,6 +421,7 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
hdr->version = RAS_TABLE_VER_V2_1;
return;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
hdr->version = RAS_TABLE_VER_V3;
return;
default:
@@ -475,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
@@ -606,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -760,18 +766,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
-
if ((amdgpu_bad_page_threshold != -1) &&
- (amdgpu_bad_page_threshold != -2))
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
-
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
+ }
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -786,8 +791,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -799,12 +805,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -865,11 +870,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
@@ -923,13 +929,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -963,11 +969,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1299,7 +1305,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1308,7 +1315,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1333,7 +1340,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1345,7 +1353,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1386,23 +1395,46 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
}
- control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
control->ras_num_mca_recs = 0;
control->ras_num_pa_recs = 0;
return 0;
@@ -1413,7 +1445,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- int res;
+ int res = 0;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1429,8 +1461,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
control->ras_num_mca_recs * adev->umc.retire_unit;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_bad_pages);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1494,11 +1527,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
"User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras)
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ec6d7ea37ad0..ebfca4cb5688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
};
/*
@@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 59acdbfe28d8..6379bb25bf5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -187,14 +210,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -243,57 +262,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -352,19 +356,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Initialize cached_rptr to 0 */
ring->cached_rptr = 0;
- /* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
- } else if (ring->ring_obj == NULL) {
+ /* Allocate ring buffer */
+ if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
@@ -372,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -401,32 +401,28 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
+ ring->adev->rings[ring->idx] = NULL;
}
/**
@@ -463,6 +459,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
+ bool ret;
if (unlikely(ring->adev->debug_disable_soft_recovery))
return false;
@@ -477,12 +474,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
@@ -718,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
@@ -789,3 +791,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the fence of the bad job */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bb2b66385223..7670f5d82b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -118,6 +118,7 @@ struct amdgpu_fence_driver {
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -127,11 +128,35 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+ uint32_t seq;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
+void amdgpu_fence_save_wptr(struct dma_fence *fence);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -141,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -164,8 +189,24 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
@@ -236,11 +277,14 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
- bool (*is_guilty)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -249,16 +293,67 @@ struct amdgpu_ring {
struct amdgpu_bo *ring_obj;
uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -276,6 +371,13 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
volatile u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
@@ -297,20 +399,15 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
/* store the cached rptr to restore after reset */
uint64_t cached_rptr;
-
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -340,7 +437,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -435,15 +532,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -474,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 03ed14663107..7e7d6c3865bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -135,7 +135,8 @@ static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{
- struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
+ struct amdgpu_ring_mux *mux = timer_container_of(mux, t,
+ resubmit_timer);
if (!spin_trylock(&mux->lock)) {
amdgpu_ring_mux_schedule_resubmit(mux);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..c210625be220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,6 +237,20 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 529c9696c2f3..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -26,6 +26,8 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -76,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -537,110 +531,78 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
return false;
}
-/**
- * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
- * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
- *
- * This function allows KFD and AMDGPU to register their own callbacks for handling
- * pre-reset and post-reset operations for engine reset. These are needed because engine
- * reset will stop all queues on that engine.
- */
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
- if (!funcs)
- return;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
- /* Ensure the reset_callback_list is initialized */
- if (!adev->sdma.reset_callback_list.next) {
- INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
- }
- /* Initialize the list node in the callback structure */
- INIT_LIST_HEAD(&funcs->list);
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
- /* Add the callback structure to the global list */
- list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+ return -EOPNOTSUPP;
}
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
- * @instance_id: ID of the SDMA engine instance to reset
- *
- * This function performs the following steps:
- * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
- * 2. Resets the specified SDMA engine instance.
- * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
+ * @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
*
* Returns: 0 on success, or a negative error code on failure.
*/
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
{
- struct sdma_on_reset_funcs *funcs;
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
- /* Stop the scheduler's work queue for the GFX and page rings if they are running.
- * This ensures that no new tasks are submitted to the queues while
- * the reset is in progress.
- */
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
}
- /* Invoke all registered pre_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->pre_reset) {
- ret = funcs->pre_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "beforeReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
+ if (sdma_instance->funcs->stop_kernel_queue) {
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
}
/* Perform the SDMA reset for the specified instance */
- ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
- dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
goto exit;
}
- /* Invoke all registered post_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->post_reset) {
- ret = funcs->post_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "afterReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
+ if (sdma_instance->funcs->start_kernel_queue) {
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
}
exit:
- /* Restart the scheduler's work queue for the GFX and page rings
- * if they were stopped by this function. This allows new tasks
- * to be submitted to the queues after the reset is complete.
- */
- if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_start(&page_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
}
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 47d56fd0589f..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -68,7 +74,7 @@ struct amdgpu_sdma_instance {
/* track guilty state of GFX and PAGE queues */
bool gfx_guilty;
bool page_guilty;
-
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -103,17 +109,11 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
-struct sdma_on_reset_funcs {
- int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- /* Linked list node to store this structure in a list; */
- struct list_head list;
-};
-
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src fence_irq;
struct amdgpu_irq_src ecc_irq;
struct amdgpu_irq_src vm_hole_irq;
struct amdgpu_irq_src doorbell_invalid_irq;
@@ -131,6 +131,8 @@ struct amdgpu_sdma {
uint32_t *ip_dump;
uint32_t supported_reset;
struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -170,8 +172,8 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..d45ebfb642ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -63,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
+ u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
- u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ va_flags);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -133,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5576ed0b508f..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,9 +249,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11dd2e0f7979..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 53b71e9d8076..27ab4e754b2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -299,7 +299,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -934,7 +935,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1060,7 +1061,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1781,7 +1782,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
@@ -1793,7 +1794,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
adev, adev->gmc.real_vram_size - reserve_size,
reserve_size, &adev->mman.fw_reserved_memory, NULL);
if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
NULL, NULL);
return ret;
@@ -1864,13 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
@@ -1878,7 +1880,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
return r;
}
@@ -1958,7 +1960,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
@@ -1981,10 +1983,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
if (adev->flags & AMD_IS_APU) {
@@ -1995,40 +1997,40 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2060,6 +2062,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
@@ -2081,13 +2085,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2119,8 +2125,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2128,8 +2135,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
@@ -2200,7 +2208,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2235,7 +2244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2354,7 +2363,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2414,7 +2424,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 208b7d1d8a27..2309df3f68a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -86,6 +86,7 @@ struct amdgpu_mman {
uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@@ -154,6 +155,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3d9e9fdc10b4..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,11 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -765,8 +770,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -779,8 +786,9 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
- &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
- &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
@@ -1152,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1384,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4eedd92f000b..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -600,6 +602,12 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
+};
+
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
@@ -629,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 0a1ef95b2866..c92b8794aa73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);
@@ -561,3 +562,26 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
return 0;
}
+
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+ int ret;
+
+ /* nps: the pa belongs to */
+ addr_in.pa.pa = pa | ((uint64_t)nps << 58);
+ addr_in.addr_type = TA_RAS_PA_TO_MCA;
+ ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
+ pa);
+
+ return ret;
+ }
+
+ *mca = addr_out.ma.err_addr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 857693bcd8d4..ec203f9e5ffa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -78,6 +78,18 @@
#define UMC_NPS_SHIFT 40
#define UMC_NPS_MASK 0xffULL
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -100,6 +112,7 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -130,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
@@ -172,4 +189,6 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch, uint32_t umc,
uint32_t node, uint32_t socket,
struct ta_ras_query_address_output *addr_out, bool dump_addr);
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..c3ace8030530
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+int
+amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
+ ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return ret;
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ char *queue_name;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ DRM_ERROR("Fail to validate\n");
+
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_va *bo_va;
+ struct ww_acquire_ctx *ticket;
+ struct drm_exec exec;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ bool clear, unlock;
+ int ret = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ goto unlock_all;
+ }
+
+ /* Lock the done list */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+ }
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ goto unlock_all;
+ spin_lock(&vm->status_lock);
+ }
+
+ ticket = &exec.ticket;
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
+ resv = bo_va->base.bo->tbo.base.resv;
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = amdgpu_userq_validate_vm_bo(NULL, bo);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ goto unlock_all;
+ }
+
+ /* Try to reserve the BO to avoid clearing its ptes */
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
+ clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
+ /* Somebody else is using the BO right now */
+ } else {
+ clear = true;
+ unlock = false;
+ }
+
+ ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+ if (unlock)
+ dma_resv_unlock(resv);
+ if (ret)
+ goto unlock_all;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_validate_bos(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..b1ca91b7cda4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+ struct dentry *debugfs_queue;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..c2a983ff23c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ size_mul(sizeof(u32), num_read_bo_handles));
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ size_mul(sizeof(u32), num_write_bo_handles));
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj));
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..97a125ab8a78
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1991dd3d1056..f1f67521c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -353,9 +354,9 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
- /* err_event_athub will corrupt VCPU buffer, so we need to
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
return 0;
return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
@@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
return ret;
}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index cdcdae7f71ce..0bc0a94d7cf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
@@ -331,7 +330,9 @@ struct amdgpu_vcn_inst {
struct dpg_pause_state *new_state);
int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -553,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
-
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0bb8cbe0dcc0..13f0cdeb59c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1323,6 +1323,9 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
{
struct amdgpu_virt *virt = &adev->virt;
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
@@ -1378,14 +1381,16 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
used_size = host_telemetry->header.used_size;
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
- return 0;
+ return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
if (!cper_dump)
return -ENOMEM;
- if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
goto out;
+ }
*more = cper_dump->more;
@@ -1425,7 +1430,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
int ret = 0;
uint32_t more = 0;
- if (!amdgpu_sriov_ras_cper_en(adev))
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
return -EOPNOTSUPP;
do {
@@ -1434,7 +1439,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
adev, virt->fw_reserve.ras_telemetry, &more);
else
ret = 0;
- } while (more);
+ } while (more && !ret);
return ret;
}
@@ -1444,6 +1449,9 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
struct amdgpu_virt *virt = &adev->virt;
int ret = 0;
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
down_read_trylock(&adev->reset_domain->sem)) {
mutex_lock(&virt->ras.ras_telemetry_mutex);
@@ -1480,3 +1488,16 @@ bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
return true;
}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index df03dba67ab8..3da3ebb1d9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -97,6 +97,7 @@ struct amdgpu_virt_ops {
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
};
/*
@@ -146,11 +147,15 @@ enum AMDGIM_FEATURE_FLAG {
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL */
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -260,7 +265,10 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
@@ -295,6 +303,9 @@ struct amdgpu_virt {
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -330,9 +341,17 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
@@ -376,6 +395,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
@@ -423,4 +446,5 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ce52b4d75e94..0b87798daebd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -622,7 +622,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_warn_ratelimited("Evicted user BO is not reserved\n");
if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
+ pr_warn_ratelimited("pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
@@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
@@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -765,6 +772,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
+ struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -787,7 +795,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader && job->base.s_fence &&
&job->base.s_fence->scheduled == isolation->spearhead;
@@ -817,7 +826,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
@@ -829,6 +838,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
+ /* this is part of the job's context */
+ af = container_of(fence, struct amdgpu_fence, base);
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
}
if (vm_flush_needed) {
@@ -1270,8 +1282,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->dma_buf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1630,7 +1642,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -2394,10 +2406,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2408,13 +2421,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
@@ -2446,7 +2457,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2506,11 +2518,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2563,8 +2575,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2606,7 +2618,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2657,8 +2669,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2773,7 +2785,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -2981,7 +2993,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -3155,3 +3167,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f3ad687125ad..fd086efd8457 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -668,4 +667,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2d7f82e98df9..78f9e86ccc09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
- if (!lpfn)
+ if (!lpfn || lpfn > man->size)
lpfn = man->size;
fpfn = (u64)place->fpfn << PAGE_SHIFT;
@@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -783,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index b256cbc2bc27..2c88d5fd87da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
- to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 23b6f7a4aa4a..c417f8689220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -27,6 +27,9 @@
#include <drm/drm_drv.h>
#include "../amdxcp/amdgpu_xcp_drv.h"
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
+
static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{
@@ -189,7 +192,7 @@ static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
-
+ amdgpu_xcp_sysfs_entries_update(xcp_mgr);
out:
mutex_unlock(&xcp_mgr->xcp_lock);
@@ -215,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
}
-int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
{
- int mode;
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return xcp_mgr->mode;
+ return true;
- if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
@@ -263,9 +278,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
- return 0;
+ ret = 0;
+ goto out;
} else if (ret) {
- return ret;
+ goto out;
}
/* Redirect all IOCTLs to the primary device */
@@ -278,9 +294,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
p_ddev->vma_offset_manager = ddev->vma_offset_manager;
p_ddev->driver = &amdgpu_partition_driver;
adev->xcp_mgr->xcp[i].ddev = p_ddev;
+
+ dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
}
+ ret = 0;
+out:
+ amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
- return 0;
+ return ret;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
@@ -288,6 +309,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{
struct amdgpu_xcp_mgr *xcp_mgr;
+ int i;
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
@@ -306,6 +328,8 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
adev->xcp_mgr = xcp_mgr;
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
return amdgpu_xcp_dev_alloc(adev);
}
@@ -433,6 +457,223 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
struct amdgpu_xcp_res_details *xcp_res, char *buf) \
@@ -635,7 +876,7 @@ static const struct attribute *xcp_attrs[] = {
NULL,
};
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
@@ -703,16 +944,16 @@ err1:
kobject_put(&xcp_cfg->kobj);
}
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
- if (!adev->xcp_mgr)
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
- xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);
@@ -722,3 +963,124 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
kobject_put(&xcp_cfg->kobj);
}
+
+/*====================== xcp sysfs - data entries ======================*/
+
+#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
+
+static ssize_t xcp_metrics_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ ssize_t size;
+
+ xcp_mgr = xcp->xcp_mgr;
+ size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
+ if (size <= 0)
+ return size;
+
+ if (size > PAGE_SIZE)
+ return -ENOSPC;
+
+ return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
+}
+
+static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+
+ if (!xcp || !xcp->valid)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
+
+static struct attribute *amdgpu_xcp_attrs[] = {
+ &xcp_sysfs_metrics.attr,
+ NULL,
+};
+
+static const struct attribute_group amdgpu_xcp_attrs_group = {
+ .attrs = amdgpu_xcp_attrs,
+ .is_visible = amdgpu_xcp_attrs_is_visible
+};
+
+static const struct kobj_type xcp_sysfs_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
+{
+ struct amdgpu_xcp *xcp;
+
+ for (n--; n >= 0; n--) {
+ xcp = &xcp_mgr->xcp[n];
+ if (!xcp->ddev || !xcp->valid)
+ continue;
+ sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ kobject_put(&xcp->kobj);
+ }
+}
+
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i, r;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ break;
+ r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
+ &xcp->ddev->dev->kobj, "xcp");
+ if (r)
+ goto out;
+
+ r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ if (r)
+ goto out;
+ }
+
+ return;
+out:
+ kobject_put(&xcp->kobj);
+}
+
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ continue;
+ sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ }
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+
+ amdgpu_xcp_cfg_sysfs_init(adev);
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+ amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index b63f53242c57..70a0f8400b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -39,6 +39,8 @@
#define AMDGPU_XCP_NO_PARTITION (~0)
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
@@ -108,6 +110,8 @@ struct amdgpu_xcp {
struct drm_driver *driver;
struct drm_vma_offset_manager *vma_offset_manager;
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
};
struct amdgpu_xcp_mgr {
@@ -142,10 +146,6 @@ struct amdgpu_xcp_mgr_funcs {
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
- int (*select_scheds)(struct amdgpu_device *adev,
- u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
- int (*update_partition_sched_list)(struct amdgpu_device *adev);
};
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -174,18 +174,17 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct drm_file *file_priv);
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
-
-#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->select_scheds ? \
- (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
-#define amdgpu_xcp_update_partition_sched_list(adev) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
- (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 477424472bbe..1ede308a7c67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -294,17 +294,46 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
+{
+ int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ if (link_num < ARRAY_SIZE(link_map_6_4_x))
+ return link_map_6_4_x[link_num];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
- const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
- const int xgmi_inst = 2;
- u32 link_inst;
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
u64 addr;
- link_inst = global_link_num % xgmi_inst;
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
- addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
- adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+ i = global_link_num / n;
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
}
@@ -1742,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
adev->gmc.xgmi.max_width = 16;
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
adev->gmc.xgmi.max_width = 16;
break;
default:
break;
}
}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 32dabba4062f..bba0b26fee8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,12 +25,6 @@
#include <drm/task_barrier.h>
#include "amdgpu_ras.h"
-enum amdgpu_xgmi_link_speed {
- XGMI_SPEED_16GT = 16,
- XGMI_SPEED_25GT = 25,
- XGMI_SPEED_32GT = 32
-};
-
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -97,7 +91,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
struct amdgpu_xgmi_ras *ras;
- enum amdgpu_xgmi_link_speed max_speed;
+ uint16_t max_speed;
uint8_t max_width;
};
@@ -125,8 +119,11 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int req_nps_mode);
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index d6ac2652f0ac..33edad1f9dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -109,10 +109,12 @@ union amd_sriov_msg_feature_flags {
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
} flags;
uint32_t all;
};
@@ -330,6 +332,7 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
};
/* mailbox message send from host to guest */
@@ -347,6 +350,9 @@ enum amd_sriov_mailbox_response_message {
MB_RES_MSG_GPU_RMA = 10,
MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
MB_RES_MSG_TEXT_MESSAGE = 255
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index ae071985f26e..811124ff88a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -29,12 +29,11 @@
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
#define XCP_INST_MASK(num_inst, xcp_id) \
(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-#define AMDGPU_XCP_OPS_KFD (1 << 0)
-
void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
{
int i;
@@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
- return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
- uint32_t inst_idx, struct amdgpu_ring *ring)
-{
- int xcp_id;
- enum AMDGPU_XCP_IP_BLOCK ip_blk;
- uint32_t inst_mask;
-
- ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
- return;
-
- inst_mask = 1 << inst_idx;
-
- switch (ring->funcs->type) {
- case AMDGPU_HW_IP_GFX:
- case AMDGPU_RING_TYPE_COMPUTE:
- case AMDGPU_RING_TYPE_KIQ:
- ip_blk = AMDGPU_XCP_GFX;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ip_blk = AMDGPU_XCP_SDMA;
- break;
- case AMDGPU_RING_TYPE_VCN_ENC:
- case AMDGPU_RING_TYPE_VCN_JPEG:
- ip_blk = AMDGPU_XCP_VCN;
- break;
- default:
- DRM_ERROR("Not support ring type %d!", ring->funcs->type);
- return;
- }
-
- for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
- if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
- ring->xcp_id = xcp_id;
- dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
- ring->xcp_id);
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
- break;
- }
- }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
- struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- unsigned int sel_xcp_id)
-{
- unsigned int *num_gpu_sched;
-
- num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
- .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
- adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
- .sched[(*num_gpu_sched)++] = &ring->sched;
- DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
- sel_xcp_id, ring->funcs->type,
- ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
- struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- int i;
-
- for (i = 0; i < MAX_XCP; i++) {
- atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
- memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
- }
-
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- ring = adev->rings[i];
- if (!ring || !ring->sched.ready || ring->no_scheduler)
- continue;
-
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
- /* VCN may be shared by two partitions under CPX MODE in certain
- * configs.
- */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- aqua_vanjaram_xcp_vcn_shared(adev))
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
- }
-
- return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
- ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
- aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
- else
- aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
- }
-
- return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
- struct amdgpu_device *adev,
- u32 hw_ip,
- u32 hw_prio,
- struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds,
- struct drm_gpu_scheduler ***scheds)
-{
- u32 sel_xcp_id;
- int i;
-
- if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
- u32 least_ref_cnt = ~0;
-
- fpriv->xcp_id = 0;
- for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
- u32 total_ref_cnt;
-
- total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
- if (total_ref_cnt < least_ref_cnt) {
- fpriv->xcp_id = i;
- least_ref_cnt = total_ref_cnt;
- }
- }
- }
- sel_xcp_id = fpriv->xcp_id;
-
- if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
- *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
- *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
- atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
- DRM_DEBUG("Selected partition #%d", sel_xcp_id);
- } else {
- DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
- return -ENOENT;
- }
-
- return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- int8_t inst)
-{
- int8_t dev_inst;
-
- switch (block) {
- case GC_HWIP:
- case SDMA0_HWIP:
- /* Both JPEG and VCN as JPEG is only alias of VCN */
- case VCN_HWIP:
- dev_inst = adev->ip_map.dev_inst[block][inst];
- break;
- default:
- /* For rest of the IPs, no look up required.
- * Assume 'logical instance == physical instance' for all configs. */
- dev_inst = inst;
- break;
- }
-
- return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- uint32_t mask)
-{
- uint32_t dev_mask = 0;
- int8_t log_inst, dev_inst;
-
- while (mask) {
- log_inst = ffs(mask) - 1;
- dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
- dev_mask |= (1 << dev_inst);
- mask &= ~(1 << log_inst);
- }
-
- return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type ip_block,
- uint32_t inst_mask)
-{
- int l = 0, i;
-
- while (inst_mask) {
- i = ffs(inst_mask) - 1;
- adev->ip_map.dev_inst[ip_block][l++] = i;
- inst_mask &= ~(1 << i);
- }
- for (; l < HWIP_MAX_INSTANCE; l++)
- adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
- u32 ip_map[][2] = {
- { GC_HWIP, adev->gfx.xcc_mask },
- { SDMA0_HWIP, adev->sdma.sdma_mask },
- { VCN_HWIP, adev->vcn.inst_mask },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
- aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
- adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
- adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
/* Fixed pattern for smn addressing on different AIDs:
* bit[34]: indicate cross AID access
* bit[33:32]: indicate target AID id
@@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
- if (mode != derv_mode)
+ if (mode != derv_mode) {
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
}
return mode;
@@ -448,53 +222,74 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
-static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
- int mode,
- struct amdgpu_xcp_cfg *xcp_cfg)
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int max_res[AMDGPU_XCP_RES_MAX] = {};
- bool res_lt_xcp;
- int num_xcp, i;
- u16 nps_modes;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
- max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
- max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
- max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
- max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
-
- switch (mode) {
+ switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcp = 1;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS2_PARTITION_MODE);
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcp = 3;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcp = 4;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
- num_xcp = NUM_XCC(adev->gfx.xcc_mask);
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
xcp_cfg->num_res = ARRAY_SIZE(max_res);
@@ -543,30 +338,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int num_xcc, num_xccs_per_xcp;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
+ return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 3) &&
- ((num_xcc % 3) == 0);
+ return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 4) &&
- (num_xccs_per_xcp >= 2);
+ return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
- /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
- * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
- * num_compute_partitions can't be less than num_mem_partitions
- */
- return ((num_xcc > 1) &&
- (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ return comp_mode && (num_xcc > 1);
default:
return false;
}
@@ -574,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return false;
}
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
-
- if (flags & AMDGPU_XCP_OPS_KFD)
- amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
- return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- int ret = 0;
-
- if (flags & AMDGPU_XCP_OPS_KFD) {
- amdgpu_amdkfd_device_probe(xcp_mgr->adev);
- amdgpu_amdkfd_device_init(xcp_mgr->adev);
- /* If KFD init failed, return failure */
- if (!xcp_mgr->adev->kfd.init_complete)
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void
-__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
-{
- struct amdgpu_device *adev = xcp_mgr->adev;
-
- xcp_mgr->supp_xcp_modes = 0;
-
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_QPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 6:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_TPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 4:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- /* this seems only existing in emulation phase */
- case 2:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 1:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
-
- default:
- break;
- }
-}
-
static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
int mode;
@@ -686,7 +416,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
- ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
if (ret)
goto unlock;
@@ -699,7 +429,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
- ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
if (!ret)
__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
@@ -782,9 +512,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
- .select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list =
- &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -799,7 +526,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
- __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
@@ -839,7 +566,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
if (ret)
return ret;
- aqua_vanjaram_ip_map_init(adev);
+ amdgpu_ip_map_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 81d195d366ce..427b073de2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1444,6 +1444,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
+ OPTIMIZER_HIDE_VAR(vbios_str);
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 521b9faab180..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 508cea965983..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
@@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
-
-
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
@@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
struct amdgpu_device *adev = ip_block->adev;
- r = cik_sdma_start(adev);
- if (r)
- return r;
-
- return r;
+ return cik_sdma_start(adev);
}
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 279288365940..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -60,9 +60,6 @@
#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
#define VMID(x) ((x) << 4)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index df401aded662..bf7c22f81cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3075,7 +3075,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3227,7 +3227,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 80f01c3989cd..47e05783c4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3206,7 +3206,7 @@ static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3358,7 +3358,7 @@ static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3488,8 +3488,7 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.set_powergating_state = dce_v11_0_set_powergating_state,
};
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 255c70959343..276c025c4c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -287,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -412,7 +412,7 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -1011,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -1089,7 +1089,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1306,6 +1306,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
@@ -1327,6 +1328,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
@@ -1348,7 +1354,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
+ u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
@@ -1358,12 +1364,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1374,13 +1380,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
}
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
}
kfree(sads);
-
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
@@ -1886,7 +1892,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1926,76 +1932,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
- fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
- GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
default:
@@ -2013,18 +2019,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -2040,7 +2046,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -2108,14 +2114,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -2125,15 +2130,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -2160,19 +2165,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -2267,8 +2272,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
-
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
@@ -2285,7 +2288,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2596,7 +2598,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2669,7 +2670,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane)
/* Disable DC tiling */
fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
}
@@ -2745,7 +2746,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2789,8 +2789,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2986,12 +2985,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -3006,28 +3005,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -3036,7 +3035,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3096,7 +3095,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3107,7 +3106,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3121,7 +3120,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3172,7 +3171,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3249,12 +3248,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3274,7 +3271,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3314,7 +3310,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -3325,7 +3320,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3541,17 +3535,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 07358546581f..e62ccf9eb73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3021,7 +3021,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
}
-static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3029,7 +3029,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ DRM_DEBUG("invalid hpd %d\n", type);
return 0;
}
@@ -3051,7 +3051,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3136,7 +3136,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3547,17 +3547,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
- .set = dce_v8_0_set_crtc_interrupt_state,
+ .set = dce_v8_0_set_crtc_irq_state,
.process = dce_v8_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
- .set = dce_v8_0_set_pageflip_interrupt_state,
+ .set = dce_v8_0_set_pageflip_irq_state,
.process = dce_v8_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
- .set = dce_v8_0_set_hpd_interrupt_state,
+ .set = dce_v8_0_set_hpd_irq_state,
.process = dce_v8_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 23e6a05359c2..7bd506f06eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
@@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
@@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI),
+ /* gfx header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
@@ -4296,9 +4325,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -4306,39 +4333,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
@@ -4752,6 +4755,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -4763,7 +4767,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -4778,7 +4782,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -4800,7 +4804,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 101 &&
adev->gfx.pfp_fw_version >= 158 &&
- adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.mec_fw_version >= 151) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
@@ -4810,7 +4814,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
break;
case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
@@ -4826,6 +4832,34 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(10, 3, 6):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 14 &&
+ adev->gfx.pfp_fw_version >= 17 &&
+ adev->gfx.mec_fw_version >= 24) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 9 &&
+ adev->gfx.mec_fw_version >= 12) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4886,7 +4920,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
/* set up the gfx ring */
for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
continue;
@@ -4918,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
+
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -9012,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
-static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, mmSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -9488,7 +9511,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9498,15 +9523,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
u64 addr;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
@@ -9526,12 +9550,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
0, 1, 0x20);
gfx_v10_0_ring_emit_reg_wait(kiq_ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9541,11 +9562,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9553,12 +9588,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -9569,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
0, 0);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9607,13 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -9645,9 +9677,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_10[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_10[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -9708,9 +9745,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_10[reg]));
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
@@ -9839,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
@@ -9880,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
index 5255378af53c..f67569ccf9f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -43,9 +43,9 @@ static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
0xd70f6a01, 0x000202ff,
0x00000400, 0x80828102,
0xbf84fff7, 0xbefc03ff,
- 0x00000068, 0xbe803080,
- 0xbe813080, 0xbe823080,
- 0xbe833080, 0x80fc847c,
+ 0x00000068, 0xbe803000,
+ 0xbe813000, 0xbe823000,
+ 0xbe833000, 0x80fc847c,
0xbf84fffa, 0xbeea0480,
0xbeec0480, 0xbeee0480,
0xbef00480, 0xbef20480,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
index 9ba3359253c9..54f7ed9e2801 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
@@ -40,7 +40,6 @@ shader main
type(CS)
wave_size(32)
// Note: original source code from SQ team
-
//
// Create 32 waves in a threadgroup (CS waves)
// Each allocates 64 VGPRs
@@ -71,8 +70,8 @@ label_0005:
s_sub_u32 s2, s2, 8
s_cbranch_scc0 label_0005
//
- s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
- s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
// CLEAR LDS
//
@@ -99,10 +98,10 @@ label_001F:
label_0023:
s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
label_sgpr_loop:
- s_movreld_b32 s0, 0
- s_movreld_b32 s1, 0
- s_movreld_b32 s2, 0
- s_movreld_b32 s3, 0
+ s_movreld_b32 s0, s0
+ s_movreld_b32 s1, s0
+ s_movreld_b32 s2, s0
+ s_movreld_b32 s3, s0
s_sub_u32 m0, m0, 4
s_cbranch_scc0 label_sgpr_loop
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2a5c2a1ae3c7..c01c241a1b06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -48,6 +48,8 @@
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX11_NUM_GFX_RINGS 1
#define GFX11_MEC_HPD_SIZE 2048
@@ -83,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
@@ -177,9 +180,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -230,7 +237,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
@@ -259,7 +275,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
@@ -580,33 +613,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -633,12 +651,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -744,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
+ else if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
@@ -833,9 +853,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -843,39 +861,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
@@ -1056,14 +1050,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
#define MQD_FWWORKAREA_SIZE 484
#define MQD_FWWORKAREA_ALIGNMENT 256
-static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
- if (adev->gfx.cp_gfx_shadow) {
- shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
- shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
- shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
- shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
return 0;
} else {
memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
@@ -1136,6 +1137,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ if (adev->gfx.disable_kq) {
+ ring->no_scheduler = true;
+ ring->no_user_submission = true;
+ }
if (!ring_id)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1568,24 +1573,18 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- int i, j, k, r, ring_id = 0;
+ int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 1;
- adev->gfx.mec.num_pipe_per_mec = 4;
- adev->gfx.mec.num_queue_per_pipe = 4;
- break;
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
@@ -1593,7 +1592,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1612,6 +1611,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2390 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2600 &&
+ adev->mes.fw_version[0] >= 120) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* add firmware version checks here */
+ if (0 && !adev->gfx.disable_uq) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 2280 &&
@@ -1640,6 +1668,34 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 12 &&
+ adev->gfx.pfp_fw_version >= 15 &&
+ adev->gfx.mec_fw_version >= 15) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 7 &&
+ adev->gfx.pfp_fw_version >= 8 &&
+ adev->gfx.mec_fw_version >= 8) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1701,37 +1757,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v11_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ ring_id = 0;
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v11_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
- k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
+ continue;
- r = gfx_v11_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v11_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -1745,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
- (adev->gfx.mec_fw_version >= 2410)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2410) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
break;
}
@@ -4061,6 +4127,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4081,6 +4149,16 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
+ mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4205,6 +4283,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -4256,6 +4336,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4509,11 +4593,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->gfx.disable_kq) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ /* we don't want to set ring->ready */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+ }
+ if (amdgpu_async_gfx_ring)
+ amdgpu_gfx_disable_kgq(adev, 0);
+ } else {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -4722,6 +4818,49 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -4731,9 +4870,11 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v11_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
- if (amdgpu_async_gfx_ring) {
+ if (amdgpu_async_gfx_ring &&
+ !adev->gfx.disable_kq) {
if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
@@ -5059,11 +5200,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ /* We need one GFX ring temporarily to set up
+ * the clear state.
+ */
+ adev->gfx.num_gfx_rings = 1;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v11_0_set_kiq_pm4_funcs(adev);
gfx_v11_0_set_ring_funcs(adev);
@@ -5094,6 +5260,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
if (r)
return r;
+
+ r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -5691,10 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -5714,10 +5881,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
@@ -5775,8 +5938,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -5804,10 +5966,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -6036,28 +6195,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v10_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -6144,21 +6288,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -6296,25 +6425,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -6481,27 +6608,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -6609,17 +6738,86 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
-static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+
+static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
+ * so the pipe reset status relies on the later gfx ring test result.
+ */
+ return 0;
+}
+
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
- if (r)
- return r;
+ if (r) {
+
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v11_reset_gfx_pipe(ring);
+ if (r)
+ return r;
+ }
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
@@ -6633,21 +6831,154 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec1 fw pc: CP_MEC1_INSTR_PNTR */
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
+ * reset status relies on the compute ring test result.
+ */
+ return 0;
}
-static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v11_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v11_0_kcq_init_queue(ring, true);
@@ -6661,7 +6992,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -6693,9 +7024,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_11[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_11[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -6755,9 +7091,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
/* ME0 is for GFX so start from 1 for CP */
soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_11[reg]));
+ if (i &&
+ gc_cp_reg_list_11[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0,
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_11[reg]));
}
index += reg_count;
}
@@ -6885,7 +7228,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
@@ -6927,7 +7269,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 62a257a4a3e9..3e138527d534 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -36,7 +36,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "soc24_enum.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15.h"
#include "clearstate_gfx12.h"
@@ -44,6 +44,8 @@
#include "gfx_v12_0.h"
#include "nbif_v6_3_1.h"
#include "mes_v12_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX12_NUM_GFX_RINGS 1
#define GFX12_MEC_HPD_SIZE 2048
@@ -77,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
@@ -133,11 +136,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
-
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -186,7 +192,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
@@ -215,7 +230,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
@@ -475,33 +507,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -528,12 +545,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -572,7 +587,7 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -599,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -881,6 +901,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
soc24_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
+ return 0;
+ }
+
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -EINVAL;
+}
+
static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v12_0_select_se_sh,
@@ -889,6 +937,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1346,6 +1395,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
unsigned num_compute_rings;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -1354,7 +1404,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1372,6 +1422,22 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2780 &&
+ adev->gfx.pfp_fw_version >= 2840 &&
+ adev->gfx.mec_fw_version >= 3050 &&
+ adev->mes.fw_version[0] >= 123) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
if (adev->gfx.me_fw_version >= 2480 &&
adev->gfx.pfp_fw_version >= 2530 &&
adev->gfx.mec_fw_version >= 2680 &&
@@ -1383,36 +1449,38 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
+ GFX_12_0_0__SRCID__CP_EOP_INTERRUPT,
&adev->gfx.eop_irq);
if (r)
return r;
/* Bad opcode Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR,
&adev->gfx.bad_op_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -1433,37 +1501,41 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v12_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v12_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev,
- 0, i, k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev,
+ 0, i, k, j))
+ continue;
- r = gfx_v12_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v12_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -1476,10 +1548,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
- (adev->gfx.mec_fw_version >= 2920)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2920) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
+ break;
+ default:
+ break;
}
if (!adev->enable_mes_kiq) {
@@ -2948,6 +3024,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -2968,6 +3046,14 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3091,6 +3177,8 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -3142,6 +3230,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3600,6 +3692,49 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3610,6 +3745,7 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v12_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3698,11 +3834,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ adev->gfx.num_gfx_rings = 0;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v12_0_set_kiq_pm4_funcs(adev);
gfx_v12_0_set_ring_funcs(adev);
@@ -3733,6 +3891,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -4172,45 +4334,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always being used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- if (ring->use_doorbell) {
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
- lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
- upper_32_bits(ring->wptr));
- }
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
+ lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
+ upper_32_bits(ring->wptr));
}
}
@@ -4235,42 +4369,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- /* XXX check if swapping is necessary on BE */
- if (ring->use_doorbell) {
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- BUG(); /* only DOORBELL method supported on gfx12 now */
- }
+ BUG(); /* only DOORBELL method supported on gfx12 now */
}
}
@@ -4317,10 +4423,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4340,10 +4442,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4383,8 +4481,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -4412,10 +4509,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -4606,21 +4700,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -4749,25 +4828,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -4934,27 +5011,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -5160,18 +5239,84 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
amdgpu_gfx_off_ctrl(adev, true);
}
-static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Sometimes the ME start pc counter can't cache correctly, so the
+ * PC check only as a reference and pipe reset result rely on the
+ * later ring test.
+ */
+ return 0;
+}
+
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
- dev_err(adev->dev, "reset via MES failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v12_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kgq_init_queue(ring, true);
@@ -5186,21 +5331,107 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
-static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r = 0;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ /* Doesn't find the F32 MEC instruction pointer register, and suppose
+ * the driver won't run into the F32 mode.
+ */
+ }
+
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Need the ring test to verify the pipe reset result.*/
+ return 0;
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v12_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kcq_init_queue(ring, true);
@@ -5214,7 +5445,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
@@ -5292,7 +5523,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
@@ -5331,7 +5561,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 13fbee46417a..70d7a1f434c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -53,6 +53,9 @@
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
@@ -1732,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
gfx_v6_0_config_init(adev);
- WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
- (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
- WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ WREG32(mmCP_QUEUE_THRESHOLDS,
+ ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
sx_debug_1 = RREG32(mmSX_DEBUG_1);
WREG32(mmSX_DEBUG_1, sx_debug_1);
@@ -2851,44 +2858,21 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8181bd0e4f18..2aa323dab34e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -55,6 +55,9 @@
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
@@ -3882,67 +3885,22 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KAVERI:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_HAWAII:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
@@ -4926,76 +4884,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
-static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
-static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5045,7 +4933,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
- .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bfedd487efc5..367449d8061b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1223,48 +1223,22 @@ out:
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
- PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
@@ -4666,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -6365,34 +6340,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
-static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6869,48 +6816,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
-static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6976,7 +6881,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
- .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d7db4cb907ae..20b30f4b3c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
- SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
+ /* packet headers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
@@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};
enum ta_ras_gfx_subblock {
@@ -1624,42 +1651,16 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
@@ -2234,6 +2235,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 167 &&
+ adev->gfx.pfp_fw_version >= 196 &&
+ adev->gfx.mec_fw_version >= 474) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(9, 4, 2):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
@@ -2390,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
@@ -5441,16 +5463,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_ce_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
@@ -5473,16 +5487,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_de_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
@@ -5672,19 +5678,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- }
+ offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -5770,28 +5766,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
@@ -7192,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v9_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
- return -ENOMEM;
- gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v9_0_ring_emit_reg_wait(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
- gfx_v9_0_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -7246,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7301,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
DRM_ERROR("fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7339,9 +7275,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -7378,9 +7319,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_9[reg]));
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_9[reg]));
}
index += reg_count;
}
@@ -7394,8 +7339,14 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* Emit the cleaner shader */
- amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ else
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
+
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
@@ -7502,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
- .reset = gfx_v9_0_reset_kgq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index d81449f9d822..c48cd47b531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1547,7 +1547,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
{
uint32_t bank, way, mem;
static const char * const vml2_way_str[] = { "BIGK", "4K" };
- static const char * const utcl2_rounter_str[] = { "VMC", "APT" };
+ static const char * const utcl2_router_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
@@ -1568,7 +1568,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
- bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ bank, utcl2_router_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 53fbf6ca7cdb..51babf5c78c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
@@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
@@ -1143,7 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if (adev->gfx.mec_fw_version >= 155) {
+ if ((adev->gfx.mec_fw_version >= 155) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
+ case IP_VERSION(9, 5, 0):
+ if ((adev->gfx.mec_fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -1262,6 +1275,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
+/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
+ * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
+ * bit in SET_RESOURCES
+ */
+static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ return;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
+}
+
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1306,6 +1335,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1318,6 +1348,22 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* ToDo: GC 9.4.4 */
+ case IP_VERSION(9, 4, 3):
+ if (adev->gfx.mec_fw_version >= 184 &&
+ (amdgpu_sriov_reg_access_sq_config(adev) ||
+ !amdgpu_sriov_vf(adev)))
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 23)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -3447,9 +3493,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
- /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->gfx.mec_fw_version >= 0x0000009b)
+ if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
@@ -3512,7 +3556,8 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
}
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
@@ -3520,12 +3565,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -3551,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
pipe_reset:
- if(r) {
+ if (r) {
+ if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+ return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
@@ -3572,14 +3618,14 @@ pipe_reset:
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
enum amdgpu_gfx_cp_ras_mem_id {
@@ -4558,12 +4604,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri
"\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
xcc_id, i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p,
- "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9_4_3[reg].reg_name,
- adev->gfx.ip_dump_compute_queues
- [xcc_offset + inst_offset +
- reg]);
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ else
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
}
inst_offset += reg_count;
}
@@ -4612,12 +4667,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
GET_INST(GC, xcc_id));
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues
- [xcc_offset +
- inst_offset + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(
- gc_cp_reg_list_9_4_3[reg],
- GET_INST(GC, xcc_id)));
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
}
inst_offset += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index cb25f7f0dfc1..6c03bf9f1ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
int i;
@@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
- if (adev->gmc.pdb0_bo) {
+ if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 809b3a882d0d..7923f491cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -428,10 +425,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index e74e26b6a4f2..f15d691e9a20 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -393,10 +390,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -752,6 +745,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
+ /* The mall_size is already calculated as mall_size_per_umc * num_umc.
+ * However, for gfx1151, which features a 2-to-1 UMC mapping,
+ * the result must be multiplied by 2 to determine the actual mall size.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 5, 1):
+ adev->gmc.mall_size *= 2;
+ break;
+ default:
+ break;
+ }
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -832,7 +837,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c6f290704d47..de763105fdfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -413,10 +410,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -820,7 +813,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a992e79d9581..8030fcd64210 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -627,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
"write" : "read", block, mc_client, mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -1098,6 +1096,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 83e39f16044a..a8d5795084fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1157,17 +1157,10 @@ static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v7_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 99ca08e9bdb5..b45fa0cea9d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1458,9 +1458,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 5effe8327d29..c4d69cf4e06c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -78,8 +78,6 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
-#define MAX_MEM_RANGES 8
-
static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
@@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
(0x001d43e0 + 0x00001800),
};
-static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev)
-{
- return !!adev->aid_mask;
-}
-
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -641,10 +634,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -652,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -801,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1131,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1142,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
@@ -1172,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
- if (mapping->bo_va->is_xgmi)
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
@@ -1213,10 +1202,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
- mtype = is_local ? MTYPE_CC : MTYPE_UC;
- else
- mtype = MTYPE_UC;
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1267,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
+ gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
+ flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1336,7 +1323,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
mtype_local = MTYPE_CC;
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
- } else if (adev->rev_id) {
+ } else {
/* MTYPE_UC case */
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
}
@@ -1388,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static enum amdgpu_memory_partition
-gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
-{
- enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
-
- if (adev->nbio.funcs->get_memory_partition_mode)
- mode = adev->nbio.funcs->get_memory_partition_mode(adev,
- supp_modes);
-
- return mode;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
-{
- switch (adev->gmc.num_mem_partitions) {
- case 0:
- return UNKNOWN_MEMORY_PARTITION_MODE;
- case 1:
- return AMDGPU_NPS1_PARTITION_MODE;
- case 2:
- return AMDGPU_NPS2_PARTITION_MODE;
- case 4:
- return AMDGPU_NPS4_PARTITION_MODE;
- default:
- return AMDGPU_NPS1_PARTITION_MODE;
- }
-
- return AMDGPU_NPS1_PARTITION_MODE;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
- return gmc_v9_0_query_vf_memory_partition(adev);
-
- return gmc_v9_0_get_memory_partition(adev, NULL);
-}
-
static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
@@ -1449,7 +1396,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
- .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
@@ -1505,7 +1452,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -1557,7 +1503,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1603,7 +1549,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
/* Mode detected by hardware and supported modes available */
if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
@@ -1639,7 +1585,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1726,7 +1672,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
@@ -1841,7 +1787,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
adev->gmc.vmid0_page_table_depth = 1;
adev->gmc.vmid0_page_table_block_size = 12;
} else {
@@ -1867,7 +1813,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
r = amdgpu_gmc_pdb0_alloc(adev);
}
@@ -1889,192 +1835,13 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
-{
- enum amdgpu_memory_partition mode;
- u32 supp_modes;
- bool valid;
-
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
-
- /* Mode detected by hardware not present in supported modes */
- if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
- !(BIT(mode - 1) & supp_modes))
- return false;
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- case AMDGPU_NPS1_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 1);
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 2);
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 3 ||
- adev->gmc.num_mem_partitions == 4);
- break;
- default:
- valid = false;
- }
-
- return valid;
-}
-
-static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
-{
- int i;
-
- /* Check if node with id 'nid' is present in 'node_ids' array */
- for (i = 0; i < num_ids; ++i)
- if (node_ids[i] == nid)
- return true;
-
- return false;
-}
-
-static void
-gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- struct amdgpu_numa_info numa_info;
- int node_ids[MAX_MEM_RANGES];
- int num_ranges = 0, ret;
- int num_xcc, xcc_id;
- uint32_t xcc_mask;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- xcc_mask = (1U << num_xcc) - 1;
-
- for_each_inst(xcc_id, xcc_mask) {
- ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
- if (ret)
- continue;
-
- if (numa_info.nid == NUMA_NO_NODE) {
- mem_ranges[0].size = numa_info.size;
- mem_ranges[0].numa.node = numa_info.nid;
- num_ranges = 1;
- break;
- }
-
- if (gmc_v9_0_is_node_present(node_ids, num_ranges,
- numa_info.nid))
- continue;
-
- node_ids[num_ranges] = numa_info.nid;
- mem_ranges[num_ranges].numa.node = numa_info.nid;
- mem_ranges[num_ranges].size = numa_info.size;
- ++num_ranges;
- }
-
- adev->gmc.num_mem_partitions = num_ranges;
-}
-
-static void
-gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- enum amdgpu_memory_partition mode;
- u32 start_addr = 0, size;
- int i, r, l;
-
- mode = gmc_v9_0_query_memory_partition(adev);
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 0;
- break;
- case AMDGPU_NPS1_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 1;
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 2;
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- if (adev->flags & AMD_IS_APU)
- adev->gmc.num_mem_partitions = 3;
- else
- adev->gmc.num_mem_partitions = 4;
- break;
- default:
- adev->gmc.num_mem_partitions = 1;
- break;
- }
-
- /* Use NPS range info, if populated */
- r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- &adev->gmc.num_mem_partitions);
- if (!r) {
- l = 0;
- for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
- if (mem_ranges[i].range.lpfn >
- mem_ranges[i - 1].range.lpfn)
- l = i;
- }
-
- } else {
- if (!adev->gmc.num_mem_partitions) {
- dev_err(adev->dev,
- "Not able to detect NPS mode, fall back to NPS1");
- adev->gmc.num_mem_partitions = 1;
- }
- /* Fallback to sw based calculation */
- size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
- size /= adev->gmc.num_mem_partitions;
-
- for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
- mem_ranges[i].range.fpfn = start_addr;
- mem_ranges[i].size =
- ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
- mem_ranges[i].range.lpfn = start_addr + size - 1;
- start_addr += size;
- }
-
- l = adev->gmc.num_mem_partitions - 1;
- }
-
- /* Adjust the last one */
- mem_ranges[l].range.lpfn =
- (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
- mem_ranges[l].size =
- adev->gmc.real_vram_size -
- ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
-}
-
-static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
-{
- bool valid;
-
- adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
- sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
- if (!adev->gmc.mem_partitions)
- return -ENOMEM;
-
- /* TODO : Get the range from PSP/Discovery for dGPU */
- if (adev->gmc.is_app_apu)
- gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
- else
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
-
- if (amdgpu_sriov_vf(adev))
- valid = true;
- else
- valid = gmc_v9_0_validate_partition_info(adev);
- if (!valid) {
- /* TODO: handle invalid case */
- dev_WARN(adev->dev,
- "Mem ranges not matching with hardware config");
- }
-
- return 0;
-}
-
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2089,7 +1856,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
+ if (amdgpu_is_multi_aid(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2239,8 +2006,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
- r = gmc_v9_0_init_mem_ranges(adev);
+ if (amdgpu_is_multi_aid(adev)) {
+ r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
@@ -2268,7 +2035,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev)) ?
+ amdgpu_is_multi_aid(adev)) ?
3 :
8;
@@ -2280,7 +2047,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2290,7 +2057,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2364,7 +2131,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
amdgpu_gmc_init_pdb0(adev);
if (adev->gart.bo == NULL) {
@@ -2411,13 +2178,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.flush_tlb_needs_extra_type_2 =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
adev->gmc.xgmi.num_physical_nodes;
- /*
- * TODO: This workaround is badly documented and had a buggy
- * implementation. We should probably verify what we do here.
- */
- adev->gmc.flush_tlb_needs_extra_type_0 =
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -2529,7 +2289,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
* information again.
*/
if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index f1dc13b3ab38..e6c0d86d3486 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -36,17 +36,6 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -180,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = {
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
- .flush_hdp = hdp_v4_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 43195c079748..8bc001dc9f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -27,17 +27,6 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -217,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
- .flush_hdp = hdp_v5_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index fcb8dd2876bc..40940b4ab400 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ if (amdgpu_sriov_vf(adev)) {
+ /* this is fine because SR_IOV doesn't remap the register */
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ }
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index a88d25a06c29..ec20daf4272c 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -30,17 +30,6 @@
#define regHDP_CLK_CNTL_V6_1 0xd5
#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0
-static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -149,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
- .flush_hdp = hdp_v6_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 49f7eb4fbd11..ed1debc03507 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -27,17 +27,6 @@
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -137,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
- .flush_hdp = hdp_v7_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index eb4185dcbd1d..5900b560b7de 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
/* update doorbell range for ih ring 0 */
@@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index cfa91d709d49..cc626036ed9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -32,6 +32,7 @@
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
@@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index df898dbb746e..58cd87db8061 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -34,12 +34,13 @@
MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin");
#define TRANSFER_RAM_MASK 0x001c0000
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
@@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
@@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data, size = 0;
- const u32 *data;
+ const u32 *data = NULL;
int r = -EINVAL;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 69dd92f6e86d..a887df520414 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -25,9 +25,16 @@
*
*/
+#include <linux/gpio/machine.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+#define ISP_PERFORMANCE_STATE_LOW 0
+#define ISP_PERFORMANCE_STATE_HIGH 1
+
+#define ISP_HIGH_PERFORMANC_XCLK 788
+#define ISP_HIGH_PERFORMANC_ICLK 788
+
static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
@@ -39,22 +46,182 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
};
+static struct gpiod_lookup_table isp_gpio_table = {
+ .dev_id = "amd_isp_capture",
+ .table = {
+ GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table isp_sensor_gpio_table = {
+ .dev_id = "i2c-ov05c10",
+ .table = {
+ GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static int isp_poweroff(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0);
+}
+
+static int isp_poweron(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0);
+}
+
+static int isp_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ u32 iclk, xclk;
+ int ret;
+
+ switch (state) {
+ case ISP_PERFORMANCE_STATE_HIGH:
+ xclk = ISP_HIGH_PERFORMANC_XCLK;
+ iclk = ISP_HIGH_PERFORMANC_ICLK;
+ break;
+ case ISP_PERFORMANCE_STATE_LOW:
+ /* isp runs at default lowest clock-rate on power-on, do nothing */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n",
+ xclk, state, ret);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n",
+ iclk, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int isp_genpd_add_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to add\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n",
+ pdev->mfd_cell->name, ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to add */
+ return 0;
+}
+
+static int isp_genpd_remove_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to remove\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n",
+ pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to remove */
+ return 0;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
+ const struct software_node *amd_camera_node, *isp4_node;
struct amdgpu_device *adev = isp->adev;
+ struct acpi_device *acpi_dev;
int idx, int_idx, num_res, r;
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
+ r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
+ if (r) {
+ drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
+ /* allow GPU init to progress */
+ return 0;
+ }
+
+ /* add GPIO resources required for OMNI5C10 sensor */
+ if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
+ gpiod_add_lookup_table(&isp_gpio_table);
+ gpiod_add_lookup_table(&isp_sensor_gpio_table);
+ }
+
isp_base = adev->rmmio_base;
+ isp->ispgpd.name = "ISP_v_4_1_1";
+ isp->ispgpd.power_off = isp_poweroff;
+ isp->ispgpd.power_on = isp_poweron;
+ isp->ispgpd.set_performance_state = isp_set_performance_state;
+
+ r = pm_genpd_init(&isp->ispgpd, NULL, true);
+ if (r) {
+ drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r);
+ return -EINVAL;
+ }
+
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
goto failure;
}
@@ -64,19 +231,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
goto failure;
}
+ amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
+ isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
+
/* initialize isp platform data */
isp->isp_pdata->adev = (void *)adev;
isp->isp_pdata->asic_type = adev->asic_type;
@@ -105,14 +273,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].swnode = isp4_node;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
goto failure;
}
@@ -131,8 +299,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp gpio res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);
goto failure;
}
@@ -148,10 +315,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[2].platform_data = isp->isp_pdata;
isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
+ /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
- drm_err(&adev->ddev,
- "%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r);
+ goto failure;
+ }
+
+ r = device_for_each_child(isp->parent, &isp->ispgpd,
+ isp_genpd_add_device);
+ if (r) {
+ drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r);
+ goto failure;
+ }
+
+ r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1);
+ if (r) {
+ drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r);
goto failure;
}
@@ -170,6 +350,9 @@ failure:
static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
{
+ device_for_each_child(isp->parent, NULL,
+ isp_genpd_remove_device);
+
mfd_remove_devices(isp->parent);
kfree(isp->isp_res);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 4cde8a8bcc83..58239c405fda 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -764,11 +767,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v2_0_stop(ring->adev);
- jpeg_v2_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v2_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v2_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 8b39e114f3be..3e2c389242db 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v2_5_stop_inst(ring->adev, ring->me);
jpeg_v2_5_start_inst(ring->adev, ring->me);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 2f8510c2986b..a44eb2667664 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v3_0_stop(ring->adev);
- jpeg_v3_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v3_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v3_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index f17ec5414fd6..da3ee69f1a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EINVAL;
+ int r;
- jpeg_v4_0_stop(ring->adev);
- jpeg_v4_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index a8ccae361ec7..b86288a69e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -149,6 +149,18 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@@ -204,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return 0;
}
@@ -230,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -434,6 +444,9 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+
return ret;
}
@@ -1041,6 +1054,14 @@ static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1120,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v4_0_3_core_stall_reset(ring);
jpeg_v4_0_3_start_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
@@ -1200,6 +1224,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
.process = jpeg_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = {
+ .set = jpeg_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1208,6 +1237,9 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
}
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
@@ -1304,9 +1336,47 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V4_0_3_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V4_0_3_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
+ .query_poison_status = jpeg_v4_0_3_query_ras_poison_status,
};
static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -1383,6 +1453,13 @@ static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
&jpeg_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index a90bf370a002..2e110d04af84 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,6 +46,13 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+enum amdgpu_jpeg_v4_0_3_sub_block {
+ AMDGPU_JPEG_V4_0_3_JPEG0 = 0,
+ AMDGPU_JPEG_V4_0_3_JPEG1,
+
+ AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 974030a5c03c..481d1a2dbe5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_5_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_5_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
@@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_5_ring_reset,
};
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 31d213ccbe0a..e0a71909252b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- return 0;
+
+ return r;
}
/**
@@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v5_0_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v5_0_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
@@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_0_ring_reset,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 218e16b68f1d..54523dc1f702 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -28,15 +28,18 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_srcid_jpeg[] = {
@@ -118,6 +121,7 @@ static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
jpeg_v5_0_1_set_dec_ring_funcs(adev);
jpeg_v5_0_1_set_irq_funcs(adev);
+ jpeg_v5_0_1_set_ras_funcs(adev);
return 0;
}
@@ -142,6 +146,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
@@ -156,21 +171,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
if (!amdgpu_sriov_vf(adev)) {
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
- if (j < 4)
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 4 + j + 32 * jpeg_inst;
- else
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 8 + j + 32 * jpeg_inst;
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -190,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- return 0;
+ return r;
}
/**
@@ -216,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -237,7 +245,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
- /* jpeg_v5_0_1_start_sriov(adev); */
+ r = jpeg_v5_0_1_start_sriov(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -264,7 +275,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->jpeg.inst[i].ring_dec[j];
if (ring->use_doorbell)
WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
- (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->pipe,
ring->doorbell_index <<
VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
@@ -291,8 +302,13 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
}
@@ -422,6 +438,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
reg_offset);
}
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j < 5) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 5].table_offset = item_offset;
+ header.mjpegdec1[j - 5].init_status = 0;
+ header.mjpegdec1[j - 5].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V5_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
+ return 0;
+}
+
/**
* jpeg_v5_0_1_start - start JPEG block
*
@@ -581,6 +710,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
@@ -603,6 +737,16 @@ static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+
+
static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -688,14 +832,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v5_0_1_core_stall_reset(ring);
jpeg_v5_0_1_init_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
@@ -772,6 +916,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
.process = jpeg_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = {
+ .set = jpeg_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -780,6 +929,10 @@ static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs;
+
}
const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
@@ -789,3 +942,150 @@ const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
.rev = 1,
.funcs = &jpeg_v5_0_1_ip_funcs,
};
+
+static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V5_0_1_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V5_0_1_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = {
+ .query_poison_status = jpeg_v5_0_1_query_ras_poison_status,
+};
+
+static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v5_0_1_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v5_0_1_err_codes,
+ ARRAY_SIZE(jpeg_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v5_0_1_aca_bank_ops,
+};
+
+static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v5_0_1_ras_hw_ops,
+ .ras_late_init = jpeg_v5_0_1_ras_late_init,
+ },
+};
+
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.ras = &jpeg_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
index efdab57324e4..a7e58d5fb246 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -26,6 +26,9 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094
+#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe
+
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
@@ -98,4 +101,11 @@ extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
+enum amdgpu_jpeg_v5_0_1_sub_block {
+ AMDGPU_JPEG_V5_0_1_JPEG0 = 0,
+ AMDGPU_JPEG_V5_0_1_JPEG1,
+
+ AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK,
+};
+
#endif /* __JPEG_V5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644
index 000000000000..d6f50b13e2ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_bo_unreserve(bo);
+ bo = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+ return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ uint64_t wptr)
+{
+ struct amdgpu_bo_va_mapping *wptr_mapping;
+ struct amdgpu_vm *wptr_vm;
+ struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ int ret;
+
+ wptr_vm = queue->vm;
+ ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+ if (ret)
+ return ret;
+
+ wptr &= AMDGPU_GMC_HOLE_MASK;
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+ amdgpu_bo_unreserve(wptr_vm->root.bo);
+ if (!wptr_mapping) {
+ DRM_ERROR("Failed to lookup wptr bo\n");
+ return -EINVAL;
+ }
+
+ wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+ return -EINVAL;
+ }
+
+ ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+ if (ret) {
+ DRM_ERROR("Failed to map wptr bo to GART\n");
+ return ret;
+ }
+
+ queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+ return 0;
+}
+
+static int convert_to_mes_priority(int priority)
+{
+ switch (priority) {
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
+ default:
+ return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
+ return AMDGPU_MES_PRIORITY_LEVEL_LOW;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
+ }
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
+ struct mes_add_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+ queue_input.process_va_start = 0;
+ queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
+
+ /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+ queue_input.process_quantum = 100000;
+ queue_input.gang_quantum = 10000;
+ queue_input.paging = false;
+
+ queue_input.process_context_addr = ctx->gpu_addr;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
+
+ queue_input.process_id = queue->vm->pasid;
+ queue_input.queue_type = queue->queue_type;
+ queue_input.mqd_addr = queue->mqd.gpu_addr;
+ queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+ queue_input.queue_size = userq_props->queue_size >> 2;
+ queue_input.doorbell_offset = userq_props->doorbell_index;
+ queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+ queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+ return r;
+ }
+
+ DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+ return 0;
+}
+
+static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_remove_queue_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+ queue_input.doorbell_offset = queue->doorbell_index;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+ return r;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *mqd_user)
+{
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r, size;
+
+ /*
+ * The FW expects at least one page space allocated for
+ * process ctx and gang ctx each. Create an object
+ * for the same.
+ */
+ size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+ r = amdgpu_userq_create_object(uq_mgr, ctx, size);
+ if (r) {
+ DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args_in,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+ struct drm_amdgpu_userq_in *mqd_user = args_in;
+ struct amdgpu_mqd_prop *userq_props;
+ int r;
+
+ /* Structure to initialize MQD for userqueue using generic MQD init function */
+ userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+ if (!userq_props) {
+ DRM_ERROR("Failed to allocate memory for userq_props\n");
+ return -ENOMEM;
+ }
+
+ if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
+ !mqd_user->queue_va || mqd_user->queue_size == 0) {
+ DRM_ERROR("Invalid MQD parameters for userqueue\n");
+ r = -EINVAL;
+ goto free_props;
+ }
+
+ r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+ if (r) {
+ DRM_ERROR("Failed to create MQD object for userqueue\n");
+ goto free_props;
+ }
+
+ /* Initialize the MQD BO with user given values */
+ userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+ userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+ userq_props->queue_size = mqd_user->queue_size;
+ userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+ userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+ userq_props->use_doorbell = true;
+ userq_props->doorbell_index = queue->doorbell_index;
+ userq_props->fence_address = queue->fence_drv->gpu_addr;
+
+ if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+ struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+ if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+ DRM_ERROR("Invalid compute IP MQD size\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(compute_mqd)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->eop_gpu_addr = compute_mqd->eop_va;
+ userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+ userq_props->hqd_active = false;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(compute_mqd);
+ } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+ struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid GFX MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_gfx_v11)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+ userq_props->csa_addr = mqd_gfx_v11->csa_va;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(mqd_gfx_v11);
+ } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+ struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid SDMA MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_sdma_v11)) {
+ DRM_ERROR("Failed to read sdma user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->csa_addr = mqd_sdma_v11->csa_va;
+ kfree(mqd_sdma_v11);
+ }
+
+ queue->userq_prop = userq_props;
+
+ r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+ if (r) {
+ DRM_ERROR("Failed to initialize MQD for userqueue\n");
+ goto free_mqd;
+ }
+
+ /* Create BO for FW operations */
+ r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+ if (r) {
+ DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ goto free_mqd;
+ }
+
+ /* FW expects WPTR BOs to be mapped into GART */
+ r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to create WPTR mapping\n");
+ goto free_ctx;
+ }
+
+ return 0;
+
+free_ctx:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+ kfree(userq_props);
+
+ return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+ kfree(queue->userq_prop);
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+ .mqd_create = mes_userq_mqd_create,
+ .mqd_destroy = mes_userq_mqd_destroy,
+ .unmap = mes_userq_unmap,
+ .map = mes_userq_map,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
index 1b4619ff9e8e..090ae8897770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +19,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_nvenc = {
- .sclass = {
- { -1, -1, NVC9B7_VIDEO_ENCODER },
- {}
- }
-};
-int
-ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userq.h"
- return -ENODEV;
-}
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index ef9538fbbf53..28eb846280dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -458,31 +475,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- if (input->use_mmio)
- return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
- input->me_id, input->pipe_id,
- input->queue_id, input->vmid);
-
- union MESAPI__RESET mes_reset_queue_pkt;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- return mes_v11_0_submit_pkt_and_poll_completion(mes,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -649,7 +641,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
+ dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
return -EINVAL;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
@@ -694,7 +686,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
@@ -723,7 +716,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->event_log_gpu_addr;
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
@@ -753,8 +746,8 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
@@ -772,7 +765,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -798,7 +791,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
- .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
};
@@ -1638,10 +1630,12 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- r = mes_v11_0_set_hw_resources_1(&adev->mes);
- if (r) {
- DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) {
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
+ }
}
r = mes_v11_0_query_sched_status(&adev->mes);
@@ -1701,22 +1695,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
- .late_init = mes_v11_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e6ab617b9a40..6b222630f3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -477,32 +494,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- union MESAPI__RESET mes_reset_queue_pkt;
- int pipe;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
- return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -762,7 +753,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -845,8 +836,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
-static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
@@ -865,7 +856,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -878,7 +869,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
}
- if (mes->adev->enable_uni_mes)
+ if (input->is_kq)
pipe = AMDGPU_MES_KIQ_PIPE;
else
pipe = AMDGPU_MES_SCHED_PIPE;
@@ -896,7 +887,6 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
- .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
};
@@ -1752,7 +1742,8 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b)
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
@@ -1811,21 +1802,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
.name = "mes_v12_0",
.early_init = mes_v12_0_early_init,
- .late_init = mes_v12_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v12_0_sw_init,
.sw_fini = mes_v12_0_sw_fini,
.hw_init = mes_v12_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 84cde1239ee4..cc688ae79e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -30,6 +30,7 @@
#include "soc15_common.h"
#include "soc15.h"
#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -75,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
u32 inst_mask;
int i;
@@ -94,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -192,10 +195,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
uint32_t tmp, inst_mask;
int i;
- /* Setup TLB control */
- inst_mask = adev->aid_mask;
- for_each_inst(i, inst_mask) {
- tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
1);
@@ -209,7 +210,26 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
}
@@ -221,6 +241,9 @@ static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
for (j = 0; j < 5; j++) { /* DAGB instances */
@@ -454,6 +477,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
return 0;
}
+static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, inst_mask;
+
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
@@ -467,15 +514,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
- 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
@@ -485,6 +523,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
}
}
+
+ mmhub_v1_8_disable_l1_tlb(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index bc3d6c2fc87a..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index f2ab5001b492..951998454b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -37,39 +37,31 @@
static const char *mmhub_client_ids_v4_1_0[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
- [5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [16][0] = "LSDMA",
+ [17][0] = "JPEG",
+ [19][0] = "VCNU",
+ [22][0] = "VSCH",
+ [23][0] = "HDP",
+ [32+23][0] = "VCNRD",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
- [5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
+ [12][1] = "DBGUNBIO",
[14][1] = "XDP",
[15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [16][1] = "LSDMA",
+ [17][1] = "JPEG",
+ [18][1] = "VCNWR",
+ [19][1] = "VCNU",
+ [22][1] = "VSCH",
+ [23][1] = "HDP",
};
static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
new file mode 100644
index 000000000000..6f749814929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V5_0_H__
+#define __MMSCH_V5_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 5
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
+
+enum mmsch_v5_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v5_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v5_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v5_0_table_info vcn0;
+ struct mmsch_v5_0_table_info mjpegdec0[5];
+ struct mmsch_v5_0_table_info mjpegdec1[5];
+};
+
+struct mmsch_v5_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_direct_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v5_0_cmd_direct_read_modify_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v5_0_cmd_direct_polling {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v5_0_cmd_end {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v5_0_cmd_indirect_write {
+ struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v5_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5411b798e11..48101a34e049 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -293,6 +292,19 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -312,26 +324,42 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
- case IDH_FLR_NOTIFICATION:
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->virt.flr_work),
- "Failed to queue work! at %s",
- __func__);
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
- case IDH_QUERY_ALIVE:
- xgpu_ai_mailbox_send_ack(adev);
- break;
- /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
- * it byfar since that polling thread will handle it,
- * other msg like flr complete is not handled here.
- */
- case IDH_CLR_MSG_BUF:
- case IDH_FLR_NOTIFICATION_CMPL:
- case IDH_READY_TO_ACCESS_GPU:
- default:
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
break;
}
@@ -387,6 +415,7 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index ed57cbc150af..874b9f8f9804 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -54,6 +55,9 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 5aadf24cb202..f6d8597452ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg == IDH_FAIL)
r = -EINVAL;
+ if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
+ r = -ENODEV;
else if (reg != event)
return -ENOENT;
@@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
@@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r) {
- dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
+ event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ } else if (r == -ENODEV) {
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. "
+ "Runtime Services are halted.\n");
+ }
+ return r;
}
msleep(10);
@@ -166,6 +177,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_event event = -1;
send_request:
+
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
@@ -187,6 +202,9 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
+ case IDH_REQ_RAS_BAD_PAGES:
+ event = IDH_RAS_BAD_PAGES_READY;
+ break;
default:
break;
}
@@ -320,6 +338,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -330,8 +349,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -342,6 +359,19 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -364,8 +394,27 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
+ }
+
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
@@ -436,6 +485,7 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
return 0;
}
@@ -480,6 +530,11 @@ static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
}
+static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -492,4 +547,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
+ .req_bad_pages = xgpu_nv_req_ras_bad_pages,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 72c9fceb9d79..5808689562cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -42,6 +42,7 @@ enum idh_request {
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -58,6 +59,9 @@ enum idh_event {
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
IDH_RAS_CPER_DUMP_READY = 14,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index f23cb79110d6..1c22bc11c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -31,9 +31,6 @@
#define NPS_MODE_MASK 0x000000FFL
-/* Core 0 Port 0 counter */
-#define smnPCIEP_NAK_COUNTER 0x1A340218
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -177,8 +174,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
+ u32 range_size;
if (use_doorbell) {
+ range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0)) ?
+ 0xb : 0x9;
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
@@ -186,7 +187,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x9);
+ range_size);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -204,7 +205,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, range_size);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
@@ -463,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
}
}
-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
-{
- u32 val, nak_r, nak_g;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- /* Get the number of NAKs received and generated */
- val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
- nak_r = val & 0xFFFF;
- nak_g = val >> 16;
-
- /* Add the total number of NAKs, i.e the number of replays */
- return (nak_r + nak_g);
-}
-
#define MMIO_REG_HOLE_OFFSET 0x1A000
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
@@ -520,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
- .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f4a91b126c73..73f87131a7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -106,7 +106,9 @@ enum psp_gfx_cmd_id
/*IDs of performance monitoring/profiling*/
GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
/* Dynamic memory partitioninig (NPS mode change)*/
- GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */
+ GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */
};
/* PSP boot config sub-commands */
@@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg {
uint32_t boot_cfg; /* boot config data */
};
+/* Command-specific response for fw reserve info */
+struct psp_gfx_uresp_fw_reserve_info {
+ uint32_t reserve_base_address_hi;
+ uint32_t reserve_base_address_lo;
+ uint32_t reserve_size;
+};
+
/* Union of command-specific responses for GPCOM ring. */
union psp_gfx_uresp {
struct psp_gfx_uresp_reserved reserved;
struct psp_gfx_uresp_bootcfg boot_cfg;
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+ struct psp_gfx_uresp_fw_reserve_info fw_reserve_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 145186a1e48f..3584b8c18fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 215543575f47..6cc05d36e359 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,11 +152,9 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
return ret;
@@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -395,7 +399,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
@@ -421,8 +426,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -601,7 +607,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -638,7 +644,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index 5697760a819b..93787a90d598 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
@@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index fcd708eae75c..4c6450d62299 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -34,9 +34,6 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
-#include "oss/osssys_4_0_offset.h"
-#include "oss/osssys_4_0_sh_mask.h"
-
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
@@ -85,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -99,11 +96,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -124,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -138,46 +132,13 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
-static void psp_v12_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- psp_v12_0_reroute_ih(psp);
-
- if (amdgpu_sriov_vf(psp->adev)) {
- /* Write low address of the ring to C2PMSG_102 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_103 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
-
- /* Write the ring initialization command to C2PMSG_101 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
-
- } else {
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
- }
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -247,16 +182,15 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -287,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -301,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index afdf8ce3b4c5..af4a7d7c4abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
@@ -71,20 +73,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
-/* VBIOS gfl defines */
-#define MBOX_READY_MASK 0x80000000
-#define MBOX_STATUS_MASK 0x0000FFFF
-#define MBOX_COMMAND_MASK 0x00FF0000
-#define MBOX_READY_FLAG 0x80000000
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
-#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
-
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
#define regMP1_PUB_SCRATCH0 0x3b10090
+#define PSP13_BL_STATUS_SIZE 100
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -151,6 +146,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
+static void psp_v13_0_bootloader_print_status(struct psp_context *psp,
+ const char *msg)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 bl_status_reg;
+ char bl_status_msg[PSP13_BL_STATUS_SIZE];
+ int i, at;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ at = 0;
+ for_each_inst(i, adev->aid_mask) {
+ bl_status_reg =
+ (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
+ << 2) +
+ adev->asic_funcs->encode_ext_smn_addressing(i);
+ at += snprintf(bl_status_msg + at,
+ PSP13_BL_STATUS_SIZE - at,
+ " status(%02i): 0x%08x", i,
+ RREG32_PCIE_EXT(bl_status_reg));
+ }
+ dev_info(adev->dev, "%s - %s", msg, bl_status_msg);
+ }
+}
+
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -161,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
@@ -192,10 +213,13 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
+ if (retry_loop && !(retry_loop % 10))
+ psp_v13_0_bootloader_print_status(
+ psp, "Waiting for bootloader completion");
}
return ret;
@@ -338,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
if (!ret)
psp_v13_0_init_sos_version(psp);
@@ -360,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -369,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -406,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -436,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -500,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -653,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -690,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
@@ -710,12 +740,14 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
- if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
+ cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -739,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -766,6 +798,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
+static int psp_v13_0_dump_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
+
+ return ret;
+}
+
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -858,6 +921,26 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
return false;
}
+static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = -EOPNOTSUPP;
+
+ /* PSP will broadcast the value to all instances */
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
+
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -879,11 +962,13 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
+ .dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
.is_reload_needed = psp_v13_0_is_reload_needed,
+ .reg_program_no_ring = psp_v13_0_reg_program_no_ring,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index eaa5512a21da..5f39a2edcc95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 256288c6cd78..38dfc5c19f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -34,7 +34,9 @@
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
@@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
- 0, true);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
@@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
@@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp,
int ret;
/* Confirm PSP is ready to start */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f6b75e3e47ff..833830bc3e2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
/* Change IH ring for UMC */
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
@@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
}
static int psp_v3_1_ring_create(struct psp_context *psp,
@@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_101), 0x80000000,
- 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, 0);
} else {
/* Write low address of the ring to C2PMSG_69 */
@@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_64), 0x80000000,
- 0x8000FFFF, false);
-
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, 0);
}
return ret;
}
@@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33ed2b158fcd..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 688a720bbbbd..36b1ca73c2ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
@@ -106,8 +107,11 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -489,7 +493,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@@ -501,6 +505,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@@ -994,6 +1001,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */
@@ -1333,6 +1341,12 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
+static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
+ .start_kernel_queue = &sdma_v4_4_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1351,8 +1365,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
- sdma_v4_4_2_set_engine_reset_funcs(adev);
-
return 0;
}
@@ -1447,6 +1459,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
/* Initialize guilty flags for GFX and PAGE queues */
adev->sdma.instance[i].gfx_guilty = false;
adev->sdma.instance[i].page_guilty = false;
+ adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1643,46 +1656,26 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i
return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
}
-static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
-}
-
-static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- if (!adev->sdma.has_page_queue)
- return false;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
-}
-
-static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 id = GET_INST(SDMA0, ring->me);
+ u32 id = ring->me;
int r;
- if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- amdgpu_amdkfd_suspend(adev, false);
- r = amdgpu_sdma_reset_engine(adev, id);
- amdgpu_amdkfd_resume(adev, false);
-
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, id, false);
+ amdgpu_amdkfd_resume(adev, true);
return r;
}
-static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+ u32 instance_id = ring->me;
u32 inst_mask;
uint64_t rptr;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -1715,11 +1708,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_
return 0;
}
-static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
- int i;
+ struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ int i, r;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1736,17 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan
return -ETIMEDOUT;
}
- return sdma_v4_4_2_inst_start(adev, inst_mask, true);
-}
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
-static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
- .pre_reset = sdma_v4_4_2_stop_queue,
- .post_reset = sdma_v4_4_2_restore_queue,
-};
+ return r;
+}
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id)
{
- amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1888,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -2143,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2176,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -2373,7 +2365,9 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- /*TODO: enable the queue reset flag until fw supported */
+ if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 0dce59f4f6e2..7dc67a22a7a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
-
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index,
- ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers (NAVI10).
*/
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_0_gfx_stop(adev);
+ sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
sdma_v5_0_rlc_stop(adev);
}
@@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256,
- AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1197,8 +1136,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+ u32 grbm_soft_reset;
+ u32 tmp;
+
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_0_stop_queue,
+ .start_kernel_queue = &sdma_v5_0_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1431,6 +1399,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1458,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
- if (adev->sdma.instance[0].fw_version >= 35)
+ if ((adev->sdma.instance[0].fw_version >= 35) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1569,35 +1540,43 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_0_gfx_stop(adev, 1 << i);
/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1628,31 +1607,27 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze*/
- freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
-
- r = sdma_v5_0_gfx_resume_instance(adev, i, true);
+ WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
-err0:
+ r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2b39a03ff0c1..3bd44c24f692 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
@@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if ((flags & AMDGPU_FENCE_FLAG_INT)) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_gfx_stop(adev, inst_mask);
sdma_v5_2_rlc_stop(adev);
}
@@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
{
- struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
- int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- grbm_soft_reset = REG_SET_FIELD(0,
- GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
- 1);
- grbm_soft_reset <<= i;
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma_v5_2_soft_reset_engine(adev, i);
udelay(50);
}
return 0;
}
+static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_2_stop_queue,
+ .start_kernel_queue = &sdma_v5_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine,
+};
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1053,8 +1035,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1337,6 +1318,8 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1364,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
- if (adev->sdma.instance[0].fw_version >= 76)
+ if ((adev->sdma.instance[0].fw_version >= 76) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
- if (adev->sdma.instance[0].fw_version >= 34)
+ if ((adev->sdma.instance[0].fw_version >= 34) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1469,35 +1454,43 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_2_gfx_stop(adev, 1 << i);
/*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1530,32 +1523,28 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze and unhalt */
- freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+ r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
-err0:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index c214c3d2149b..e6d8eddda2bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -43,6 +43,8 @@
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -376,11 +378,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -891,6 +891,12 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -917,33 +923,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -956,10 +951,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -971,8 +963,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -995,37 +986,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1053,10 +1030,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1067,8 +1041,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1300,6 +1273,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r)
return r;
@@ -1329,11 +1319,19 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_11_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1357,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
- if (adev->sdma.instance[0].fw_version >= 21)
+ if ((adev->sdma.instance[0].fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1376,6 +1375,43 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 2):
+ if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 3):
+ if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 0):
+ if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 1):
+ if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 2):
+ if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 3):
+ if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
+
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -1399,11 +1435,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v6_0_start(adev);
+ r = sdma_v6_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v6_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1415,6 +1479,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
+ sdma_v6_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1506,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v6_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1555,25 +1620,9 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1595,6 +1644,29 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1731,6 +1803,10 @@ static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
.process = sdma_v6_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = {
+ .process = sdma_v6_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
.process = sdma_v6_0_process_illegal_inst_irq,
};
@@ -1740,6 +1816,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b2706221df99..326ecc8d37d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -33,7 +33,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "hdp/hdp_6_0_0_offset.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15_common.h"
#include "soc15.h"
@@ -42,6 +42,8 @@
#include "sdma_common.h"
#include "sdma_v7_0.h"
#include "v12_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@@ -204,66 +206,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "regSDMA%i_GFX_RB_WPTR == 0x%08x "
- "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -407,11 +382,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -829,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
-static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v7_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/**
@@ -935,6 +908,12 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_mcu_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_mcu_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -961,33 +940,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1000,10 +968,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1015,8 +980,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1039,37 +1003,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1097,10 +1047,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1111,8 +1058,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1312,6 +1258,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");
@@ -1337,16 +1300,24 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
- GFX_11_0_0__SRCID__SDMA_TRAP,
+ GFX_12_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_12_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1366,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
@@ -1378,6 +1350,16 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
+
return r;
}
@@ -1400,11 +1382,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v7_0_start(adev);
+ r = sdma_v7_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v7_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1416,6 +1426,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v7_0_ctx_switch_enable(adev, false);
sdma_v7_0_enable(adev, false);
+ sdma_v7_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1533,25 +1544,9 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1573,6 +1568,29 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1710,6 +1728,10 @@ static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = {
.process = sdma_v7_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v7_0_fence_irq_funcs = {
+ .process = sdma_v7_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = {
.process = sdma_v7_0_process_illegal_inst_irq,
};
@@ -1719,6 +1741,7 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 2247f6a94858..e0f139de7991 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -35,6 +35,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
+
#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
@@ -44,17 +45,30 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
-#include "amdgpu_vkms.h"
+
+#include "uvd/uvd_4_0_d.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
-#include "uvd/uvd_4_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "si_enums.h"
#include "amdgpu_dm.h"
+#include "amdgpu_vkms.h"
static const u32 tahiti_golden_registers[] =
{
@@ -1071,8 +1085,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- r = RREG32(SMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1082,8 +1096,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- WREG32(SMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -1110,20 +1124,20 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
- {GRBM_STATUS},
+ {mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
- {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
- {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
- {GB_ADDR_CONFIG},
- {MC_ARB_RAMCFG},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
{mmGB_TILE_MODE1},
{mmGB_TILE_MODE2},
@@ -1156,7 +1170,7 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{mmGB_TILE_MODE29},
{mmGB_TILE_MODE30},
{mmGB_TILE_MODE31},
- {CC_RB_BACKEND_DISABLE, true},
+ {mmCC_RB_BACKEND_DISABLE, true},
{mmGC_USER_RB_BACKEND_DISABLE, true},
{mmPA_SC_RASTER_CONFIG, true},
};
@@ -1264,37 +1278,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
u32 rom_cntl;
bool r;
- bus_cntl = RREG32(R600_BUS_CNTL);
+ bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
- d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
- d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32(R600_ROM_CNTL);
/* enable the rom */
- WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(VGA_RENDER_CONTROL,
- (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = amdgpu_read_bios(adev);
/* restore regs */
- WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
@@ -1331,23 +1345,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
{
u32 tmp, i;
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK)
break;
udelay(1);
}
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK |
+ CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK);
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
@@ -1358,21 +1373,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
}
static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
@@ -1454,14 +1469,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t temp;
- temp = RREG32(CONFIG_CNTL);
+ temp = RREG32(mmCONFIG_CNTL);
if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
temp &= ~(1<<1);
}
- WREG32(CONFIG_CNTL, temp);
+ WREG32(mmCONFIG_CNTL, temp);
}
static u32 si_get_xclk(struct amdgpu_device *adev)
@@ -1469,12 +1484,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
+ tmp = RREG32(mmCG_CLKPIN_CNTL_2);
+ if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK)
return TCLK;
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
+ tmp = RREG32(mmCG_CLKPIN_CNTL);
+ if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
return reference_clock;
@@ -1519,9 +1534,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
- switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
@@ -1568,13 +1583,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
return;
}
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (LC_RECONFIG_NOW |
- LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT;
+ link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK);
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
@@ -2018,7 +2033,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
{
- return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
@@ -2239,9 +2254,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
@@ -2268,17 +2283,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
@@ -2301,13 +2316,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL2,
&gpu_cfg2);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
@@ -2333,16 +2348,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
}
}
}
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -2354,13 +2369,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS, tmp16);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
@@ -2418,121 +2433,121 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data);
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2);
+ data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3);
+ data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2);
+ data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3);
+ data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data);
}
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL);
+ data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL);
+ data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data);
if (!disable_clkreq &&
!pci_is_root_bus(adev->pdev->bus)) {
@@ -2548,64 +2563,64 @@ static void si_program_aspm(struct amdgpu_device *adev)
}
if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data);
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ orig = data = RREG32(mmTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
- WREG32(THM_CLK_CNTL, data);
+ WREG32(mmTHM_CLK_CNTL, data);
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ orig = data = RREG32(mmMISC_CLK_CNTL);
+ data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT);
if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
+ WREG32(mmMISC_CLK_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
+ WREG32(mmCG_CLKPIN_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
+ WREG32(mmCG_CLKPIN_CNTL_2, data);
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
+ orig = data = RREG32(mmMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT;
if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
+ WREG32(mmMPLL_BYPASSCLK_SEL, data);
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
+ orig = data = RREG32(mmSPLL_CNTL_MODE);
+ data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK;
if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
+ WREG32(mmSPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
+ WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
+ data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index e2089c8da71b..7f18e4875287 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -27,6 +27,8 @@
#include "si.h"
#include "sid.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
DMA0_REGISTER_OFFSET,
@@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+/**
+ * si_dma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
+/**
+ * si_dma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
}
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
@@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
}
}
@@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
- WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0);
rptr_addr = ring->rptr_gpu_addr;
- WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
- WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
- WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
/* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+ ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK;
#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
+ ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+ WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+ dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->sdma.num_instances = 2;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
@@ -545,9 +561,9 @@ static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS2);
+ u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK))
return false;
return true;
@@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
- orig = data = RREG32(DMA_CLK_CTRL + offset);
+ orig = data = RREG32(mmDMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, data);
}
}
@@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+ WREG32(mmDMA_PGFSM_WRITE, 0x00002000);
+ WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff);
for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
+ WREG32(mmDMA_PGFSM_WRITE, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index d656ef1fa6e1..6da65778292b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,115 +23,15 @@
#ifndef SI_ENUMS_H
#define SI_ENUMS_H
-#define VBLANK_INT_MASK (1 << 0)
-#define DC_HPDx_INT_EN (1 << 16)
-#define VBLANK_ACK (1 << 4)
-#define VLINE_ACK (1 << 4)
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define VGA_VSTATUS_CNTL 0xFFFCFFFF
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
-#define INTERLEAVE_EN (1 << 0)
-
-#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-
-#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-#define GRPH_ENDIAN_NONE 0
-#define GRPH_ENDIAN_8IN16 1
-#define GRPH_ENDIAN_8IN32 2
-#define GRPH_ENDIAN_8IN64 3
-#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-#define GRPH_RED_SEL_R 0
-#define GRPH_RED_SEL_G 1
-#define GRPH_RED_SEL_B 2
-#define GRPH_RED_SEL_A 3
-#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-#define GRPH_GREEN_SEL_G 0
-#define GRPH_GREEN_SEL_B 1
-#define GRPH_GREEN_SEL_A 2
-#define GRPH_GREEN_SEL_R 3
-#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-#define GRPH_BLUE_SEL_B 0
-#define GRPH_BLUE_SEL_A 1
-#define GRPH_BLUE_SEL_R 2
-#define GRPH_BLUE_SEL_G 3
-#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-#define GRPH_ALPHA_SEL_A 0
-#define GRPH_ALPHA_SEL_R 1
-#define GRPH_ALPHA_SEL_G 2
-#define GRPH_ALPHA_SEL_B 3
-
-#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define GRPH_DEPTH_8BPP 0
-#define GRPH_DEPTH_16BPP 1
-#define GRPH_DEPTH_32BPP 2
-
-#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
-#define GRPH_FORMAT_INDEXED 0
-#define GRPH_FORMAT_ARGB1555 0
-#define GRPH_FORMAT_ARGB565 1
-#define GRPH_FORMAT_ARGB4444 2
-#define GRPH_FORMAT_AI88 3
-#define GRPH_FORMAT_MONO16 4
-#define GRPH_FORMAT_BGRA5551 5
-#define GRPH_FORMAT_ARGB8888 0
-#define GRPH_FORMAT_ARGB2101010 1
-#define GRPH_FORMAT_32BPP_DIG 2
-#define GRPH_FORMAT_8B_ARGB2101010 3
-#define GRPH_FORMAT_BGRA1010102 4
-#define GRPH_FORMAT_8B_BGRA1010102 5
-#define GRPH_FORMAT_RGB111110 6
-#define GRPH_FORMAT_BGR101111 7
-
-#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_ARRAY_LINEAR_GENERAL 0
-#define GRPH_ARRAY_LINEAR_ALIGNED 1
-#define GRPH_ARRAY_1D_TILED_THIN1 2
-#define GRPH_ARRAY_2D_TILED_THIN1 4
-#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-
-#define CURSOR_EN (1 << 0)
-#define CURSOR_MODE(x) (((x) & 0x3) << 8)
-#define CURSOR_MONO 0
-#define CURSOR_24_1 1
-#define CURSOR_24_8_PRE_MULT 2
-#define CURSOR_24_8_UNPRE_MULT 3
-#define CURSOR_2X_MAGNIFY (1 << 16)
-#define CURSOR_FORCE_MC_ON (1 << 20)
-#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-#define CURSOR_URGENT_ALWAYS 0
-#define CURSOR_URGENT_1_8 1
-#define CURSOR_URGENT_1_4 2
-#define CURSOR_URGENT_3_8 3
-#define CURSOR_URGENT_1_2 4
-#define CURSOR_UPDATE_PENDING (1 << 0)
-#define CURSOR_UPDATE_TAKEN (1 << 1)
-#define CURSOR_UPDATE_LOCK (1 << 16)
-#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define ES_AND_GS_AUTO 3
-#define RADEON_PACKET_TYPE3 3
-#define CE_PARTITION_BASE 3
-#define BUF_SWAP_32BIT (2 << 16)
#define GFX_POWER_STATUS (1 << 1)
#define GFX_CLOCK_STATUS (1 << 2)
#define GFX_LS_STATUS (1 << 3)
-#define RLC_BUSY_STATUS (1 << 0)
+#define RLC_BUSY_STATUS (1 << 0)
#define RLC_PUD(x) ((x) << 0)
#define RLC_PUD_MASK (0xff << 0)
#define RLC_PDD(x) ((x) << 8)
@@ -140,140 +40,8 @@
#define RLC_TTPD_MASK (0xff << 16)
#define RLC_MSD(x) ((x) << 24)
#define RLC_MSD_MASK (0xff << 24)
-#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
-#define WRITE_DATA_DST_SEL(x) ((x) << 8)
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
-#define PACKET3_NOP 0x10
-#define PACKET3_SET_BASE 0x11
-#define PACKET3_BASE_INDEX(x) ((x) << 0)
-#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDEX_BUFFER_SIZE 0x13
-#define PACKET3_DISPATCH_DIRECT 0x15
-#define PACKET3_DISPATCH_INDIRECT 0x16
-#define PACKET3_ALLOC_GDS 0x1B
-#define PACKET3_WRITE_GDS_RAM 0x1C
-#define PACKET3_ATOMIC_GDS 0x1D
-#define PACKET3_ATOMIC 0x1E
-#define PACKET3_OCCLUSION_QUERY 0x1F
-#define PACKET3_SET_PREDICATION 0x20
-#define PACKET3_REG_RMW 0x21
-#define PACKET3_COND_EXEC 0x22
-#define PACKET3_PRED_EXEC 0x23
-#define PACKET3_DRAW_INDIRECT 0x24
-#define PACKET3_DRAW_INDEX_INDIRECT 0x25
-#define PACKET3_INDEX_BASE 0x26
-#define PACKET3_DRAW_INDEX_2 0x27
-#define PACKET3_CONTEXT_CONTROL 0x28
-#define PACKET3_INDEX_TYPE 0x2A
-#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
-#define PACKET3_DRAW_INDEX_AUTO 0x2D
-#define PACKET3_DRAW_INDEX_IMMD 0x2E
-#define PACKET3_NUM_INSTANCES 0x2F
-#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
-#define PACKET3_INDIRECT_BUFFER_CONST 0x31
-#define PACKET3_INDIRECT_BUFFER 0x3F
-#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
-#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
-#define PACKET3_WRITE_DATA 0x37
-#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
-#define PACKET3_MEM_SEMAPHORE 0x39
-#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
-#define PACKET3_WAIT_REG_MEM 0x3C
-#define PACKET3_MEM_WRITE 0x3D
-#define PACKET3_COPY_DATA 0x40
-#define PACKET3_CP_DMA 0x41
-# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
-# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
-# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
-# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
-# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
-# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
-# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
-# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
-# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
-# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_PFP_SYNC_ME 0x42
-#define PACKET3_SURFACE_SYNC 0x43
-# define PACKET3_DEST_BASE_0_ENA (1 << 0)
-# define PACKET3_DEST_BASE_1_ENA (1 << 1)
-# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
-# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
-# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
-# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
-# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
-# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
-# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
-# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
-# define PACKET3_DEST_BASE_2_ENA (1 << 19)
-# define PACKET3_DEST_BASE_3_ENA (1 << 21)
-# define PACKET3_TCL1_ACTION_ENA (1 << 22)
-# define PACKET3_TC_ACTION_ENA (1 << 23)
-# define PACKET3_CB_ACTION_ENA (1 << 25)
-# define PACKET3_DB_ACTION_ENA (1 << 26)
-# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
-# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
-#define PACKET3_ME_INITIALIZE 0x44
-#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-#define PACKET3_COND_WRITE 0x45
-#define PACKET3_EVENT_WRITE 0x46
-#define PACKET3_EVENT_WRITE_EOP 0x47
-#define PACKET3_EVENT_WRITE_EOS 0x48
-#define PACKET3_PREAMBLE_CNTL 0x4A
-# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
-# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
-#define PACKET3_ONE_REG_WRITE 0x57
-#define PACKET3_LOAD_CONFIG_REG 0x5F
-#define PACKET3_LOAD_CONTEXT_REG 0x60
-#define PACKET3_LOAD_SH_REG 0x61
-#define PACKET3_SET_CONFIG_REG 0x68
-#define PACKET3_SET_CONFIG_REG_START 0x00002000
-#define PACKET3_SET_CONFIG_REG_END 0x00002c00
-#define PACKET3_SET_CONTEXT_REG 0x69
-#define PACKET3_SET_CONTEXT_REG_START 0x000a000
-#define PACKET3_SET_CONTEXT_REG_END 0x000a400
-#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
-#define PACKET3_SET_RESOURCE_INDIRECT 0x74
-#define PACKET3_SET_SH_REG 0x76
-#define PACKET3_SET_SH_REG_START 0x00002c00
-#define PACKET3_SET_SH_REG_END 0x00003000
-#define PACKET3_SET_SH_REG_OFFSET 0x77
-#define PACKET3_ME_WRITE 0x7A
-#define PACKET3_SCRATCH_RAM_WRITE 0x7D
-#define PACKET3_SCRATCH_RAM_READ 0x7E
-#define PACKET3_CE_WRITE 0x7F
-#define PACKET3_LOAD_CONST_RAM 0x80
-#define PACKET3_WRITE_CONST_RAM 0x81
-#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
-#define PACKET3_DUMP_CONST_RAM 0x83
-#define PACKET3_INCREMENT_CE_COUNTER 0x84
-#define PACKET3_INCREMENT_DE_COUNTER 0x85
-#define PACKET3_WAIT_ON_CE_COUNTER 0x86
-#define PACKET3_WAIT_ON_DE_COUNTER 0x87
-#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
-#define PACKET3_SET_CE_DE_COUNTERS 0x89
-#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
-#define PACKET3_SWITCH_BUFFER 0x8B
-#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5c38e1fb1dca..1df00f8a2406 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -27,6 +27,7 @@
#include "amdgpu_ih.h"
#include "sid.h"
#include "si_ih.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
@@ -213,7 +214,7 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
return false;
@@ -239,23 +240,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbf232f5235b..cbd4f8951cfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -24,43 +24,12 @@
#ifndef SI_H
#define SI_H
-#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-
-#define SI_MAX_SH_GPRS 256
-#define SI_MAX_TEMP_GPRS 16
-#define SI_MAX_SH_THREADS 256
-#define SI_MAX_SH_STACK_ENTRIES 4096
-#define SI_MAX_FRC_EOV_CNT 16384
-#define SI_MAX_BACKENDS 8
-#define SI_MAX_BACKENDS_MASK 0xFF
-#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
-#define SI_MAX_SIMDS 12
-#define SI_MAX_SIMDS_MASK 0x0FFF
-#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
-#define SI_MAX_PIPES 8
-#define SI_MAX_PIPES_MASK 0xFF
-#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
-#define SI_MAX_LDS_NUM 0xFFFF
-#define SI_MAX_TCC 16
-#define SI_MAX_TCC_MASK 0xFFFF
#define SI_MAX_CTLACKS_ASSERTION_WAIT 100
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x80
-#define SMC_IND_DATA_0 0x81
-
-#define SMC_IND_ACCESS_CNTL 0x8A
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x8B
-#define SMC_RESP_0 0x8C
-
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
@@ -68,9 +37,6 @@
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
-#define VGA_HDP_CONTROL 0xCA
-#define VGA_MEMORY_DISABLE (1 << 4)
-
#define DCCG_DISP_SLOW_SELECT_REG 0x13F
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
@@ -79,47 +45,6 @@
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-#define CG_SPLL_FUNC_CNTL 0x180
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x181
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x182
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x183
-
-#define SPLL_STATUS 0x185
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x186
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x188
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x18b
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x18d
# define UPLL_RESET_MASK 0x00000001
@@ -149,317 +74,13 @@
#define CG_UPLL_SPREAD_SPECTRUM 0x194
# define SSEN_MASK 0x00000001
-#define MPLL_BYPASSCLK_SEL 0x197
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x198
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x199
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x19b
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x19c
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x1c0
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x1c1
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x1c2
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x1c4
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x1c5
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x1d5
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x1d6
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x1d7
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x1dc
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x1dd
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x1e0
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x1e1
-#define SCLK_PWRMGT_CNTL 0x1e2
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x1ef
-
-#define CG_FFCT_0 0x1f0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x1ff
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x200
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x201
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x203
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x20a
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x21e
-#define CG_ULV_PARAMETER 0x21f
-
-#define SMC_SCRATCH0 0x221
-
-#define CG_CAC_CTRL 0x22e
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
-#define DMIF_ADDR_CONFIG 0x2F5
-
-#define DMIF_ADDR_CALC 0x300
-
-#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
-# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
-# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
-
-#define SRBM_STATUS 0x394
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
-
-#define SRBM_SOFT_RESET 0x398
-#define SOFT_RESET_BIF (1 << 1)
-#define SOFT_RESET_DC (1 << 5)
-#define SOFT_RESET_DMA1 (1 << 6)
-#define SOFT_RESET_GRBM (1 << 8)
-#define SOFT_RESET_HDP (1 << 9)
-#define SOFT_RESET_IH (1 << 10)
-#define SOFT_RESET_MC (1 << 11)
-#define SOFT_RESET_ROM (1 << 14)
-#define SOFT_RESET_SEM (1 << 15)
-#define SOFT_RESET_VMC (1 << 17)
-#define SOFT_RESET_DMA (1 << 20)
-#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
-#define SOFT_RESET_ORB (1 << 23)
-
-#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
-#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
-
-#define SRBM_READ_ERROR 0x3A6
-#define SRBM_INT_CNTL 0x3A8
-#define SRBM_INT_ACK 0x3AA
-
-#define SRBM_STATUS2 0x3B1
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
-#define VM_L2_CNTL 0x500
-#define ENABLE_L2_CACHE (1 << 0)
-#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
-#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
-#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
-#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
-#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
-#define VM_L2_CNTL2 0x501
-#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-#define INVALIDATE_L2_CACHE (1 << 1)
-#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
-#define INVALIDATE_PTE_AND_PDE_CACHES 0
-#define INVALIDATE_ONLY_PTE_CACHES 1
-#define INVALIDATE_ONLY_PDE_CACHES 2
-#define VM_L2_CNTL3 0x502
-#define BANK_SELECT(x) ((x) << 0)
-#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
-#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
-#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
-#define VM_L2_STATUS 0x503
-#define L2_BUSY (1 << 0)
-#define VM_CONTEXT0_CNTL 0x504
-#define ENABLE_CONTEXT (1 << 0)
-#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
-#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
-#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
-#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
-#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
-#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
-#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
-#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
-#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
-#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
-#define VM_CONTEXT1_CNTL 0x505
-#define VM_CONTEXT0_CNTL2 0x50C
-#define VM_CONTEXT1_CNTL2 0x50D
-#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
-#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
-#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
-#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
-#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
-#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
-#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
-#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
-
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
-
#define VM_INVALIDATE_REQUEST 0x51E
#define VM_INVALIDATE_RESPONSE 0x51F
-#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
-#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
-
-#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
-#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
-#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
-#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
-#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
-#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
-#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
-#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
-#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
-#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
-
-#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
-#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
-
#define VM_L2_CG 0x570
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
-#define MC_SHARED_CHMAP 0x801
-#define NOOFCHAN_SHIFT 12
-#define NOOFCHAN_MASK 0x0000f000
-#define MC_SHARED_CHREMAP 0x802
-
#define MC_VM_FB_LOCATION 0x809
#define MC_VM_AGP_TOP 0x80A
#define MC_VM_AGP_BOT 0x80B
@@ -491,21 +112,6 @@
#define MC_CITF_MISC_WR_CG 0x993
#define MC_CITF_MISC_VM_CG 0x994
-#define MC_ARB_RAMCFG 0x9D8
-#define NOOFBANK_SHIFT 0
-#define NOOFBANK_MASK 0x00000003
-#define NOOFRANK_SHIFT 2
-#define NOOFRANK_MASK 0x00000004
-#define NOOFROWS_SHIFT 3
-#define NOOFROWS_MASK 0x00000038
-#define NOOFCOLS_SHIFT 6
-#define NOOFCOLS_MASK 0x000000C0
-#define CHANSIZE_SHIFT 8
-#define CHANSIZE_MASK 0x00000100
-#define CHANSIZE_OVERRIDE (1 << 11)
-#define NOOFGROUPS_SHIFT 12
-#define NOOFGROUPS_MASK 0x00001000
-
#define MC_ARB_DRAM_TIMING 0x9DD
#define MC_ARB_DRAM_TIMING2 0x9DE
@@ -631,20 +237,6 @@
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
-#define HDP_HOST_PATH_CNTL 0xB00
-#define CLOCK_GATING_DIS (1 << 23)
-#define HDP_NONSURFACE_BASE 0xB01
-#define HDP_NONSURFACE_INFO 0xB02
-#define HDP_NONSURFACE_SIZE 0xB03
-
-#define HDP_DEBUG0 0xBCC
-
-#define HDP_ADDR_CONFIG 0xBD2
-#define HDP_MISC_CNTL 0xBD3
-#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0xBD4
-#define HDP_LS_ENABLE (1 << 0)
-
#define ATC_MISC_CG 0xCD4
#define IH_RB_CNTL 0xF80
@@ -674,8 +266,6 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
-#define CONFIG_MEMSIZE 0x150A
-
#define INTERRUPT_CNTL 0x151A
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
@@ -683,486 +273,22 @@
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x151B
-#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
-
-#define BIF_FB_EN 0x1524
-#define FB_READ_EN (1 << 0)
-#define FB_WRITE_EN (1 << 1)
-
-#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
-
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
-#define DC_LB_MEMORY_SPLIT 0x1AC3
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-
-#define PRIORITY_A_CNT 0x1AC6
-#define PRIORITY_MARK_MASK 0x7fff
-#define PRIORITY_OFF (1 << 16)
-#define PRIORITY_ALWAYS_ON (1 << 20)
-#define PRIORITY_B_CNT 0x1AC7
-
-#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
-# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DPG_PIPE_LATENCY_CONTROL 0x1B33
-# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
-# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
-
-/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
-#define VLINE_STATUS 0x1AEE
-# define VLINE_OCCURRED (1 << 0)
-# define VLINE_ACK (1 << 4)
-# define VLINE_STAT (1 << 12)
-# define VLINE_INTERRUPT (1 << 16)
-# define VLINE_INTERRUPT_TYPE (1 << 17)
-/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
-#define VBLANK_STATUS 0x1AEF
-# define VBLANK_OCCURRED (1 << 0)
-# define VBLANK_ACK (1 << 4)
-# define VBLANK_STAT (1 << 12)
-# define VBLANK_INTERRUPT (1 << 16)
-# define VBLANK_INTERRUPT_TYPE (1 << 17)
-
-/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
-#define INT_MASK 0x1AD0
-# define VBLANK_INT_MASK (1 << 0)
-# define VLINE_INT_MASK (1 << 4)
-
-#define DISP_INTERRUPT_STATUS 0x183D
-# define LB_D1_VLINE_INTERRUPT (1 << 2)
-# define LB_D1_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD1_INTERRUPT (1 << 17)
-# define DC_HPD1_RX_INTERRUPT (1 << 18)
-# define DACA_AUTODETECT_INTERRUPT (1 << 22)
-# define DACB_AUTODETECT_INTERRUPT (1 << 23)
-# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
-# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
-#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
-# define LB_D2_VLINE_INTERRUPT (1 << 2)
-# define LB_D2_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD2_INTERRUPT (1 << 17)
-# define DC_HPD2_RX_INTERRUPT (1 << 18)
-# define DISP_TIMER_INTERRUPT (1 << 24)
-#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
-# define LB_D3_VLINE_INTERRUPT (1 << 2)
-# define LB_D3_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD3_INTERRUPT (1 << 17)
-# define DC_HPD3_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
-# define LB_D4_VLINE_INTERRUPT (1 << 2)
-# define LB_D4_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD4_INTERRUPT (1 << 17)
-# define DC_HPD4_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
-# define LB_D5_VLINE_INTERRUPT (1 << 2)
-# define LB_D5_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD5_INTERRUPT (1 << 17)
-# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
-# define LB_D6_VLINE_INTERRUPT (1 << 2)
-# define LB_D6_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD6_INTERRUPT (1 << 17)
-# define DC_HPD6_RX_INTERRUPT (1 << 18)
-
-/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
-#define GRPH_INT_STATUS 0x1A16
-# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
-# define GRPH_PFLIP_INT_CLEAR (1 << 8)
-/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
-#define GRPH_INT_CONTROL 0x1A17
-# define GRPH_PFLIP_INT_MASK (1 << 0)
-# define GRPH_PFLIP_INT_TYPE (1 << 8)
-
-#define DAC_AUTODETECT_INT_CONTROL 0x19F2
-
-#define DC_HPD1_INT_STATUS 0x1807
-#define DC_HPD2_INT_STATUS 0x180A
-#define DC_HPD3_INT_STATUS 0x180D
-#define DC_HPD4_INT_STATUS 0x1810
-#define DC_HPD5_INT_STATUS 0x1813
-#define DC_HPD6_INT_STATUS 0x1816
-# define DC_HPDx_INT_STATUS (1 << 0)
-# define DC_HPDx_SENSE (1 << 1)
-# define DC_HPDx_RX_INT_STATUS (1 << 8)
-
-#define DC_HPD1_INT_CONTROL 0x1808
-#define DC_HPD2_INT_CONTROL 0x180B
-#define DC_HPD3_INT_CONTROL 0x180E
-#define DC_HPD4_INT_CONTROL 0x1811
-#define DC_HPD5_INT_CONTROL 0x1814
-#define DC_HPD6_INT_CONTROL 0x1817
-# define DC_HPDx_INT_ACK (1 << 0)
-# define DC_HPDx_INT_POLARITY (1 << 8)
-# define DC_HPDx_INT_EN (1 << 16)
-# define DC_HPDx_RX_INT_ACK (1 << 20)
-# define DC_HPDx_RX_INT_EN (1 << 24)
-
-#define DC_HPD1_CONTROL 0x1809
-#define DC_HPD2_CONTROL 0x180C
-#define DC_HPD3_CONTROL 0x180F
-#define DC_HPD4_CONTROL 0x1812
-#define DC_HPD5_CONTROL 0x1815
-#define DC_HPD6_CONTROL 0x1818
-# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
-# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
-# define DC_HPDx_EN (1 << 28)
-
-#define DPG_PIPE_STUTTER_CONTROL 0x1B35
-# define STUTTER_ENABLE (1 << 0)
-
-/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
-#define CRTC_STATUS_FRAME_COUNT 0x1BA6
-
-/* Audio clocks */
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define GRBM_CNTL 0x2000
-#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-
-#define GRBM_STATUS2 0x2002
-#define RLC_RQ_PENDING (1 << 0)
-#define RLC_BUSY (1 << 8)
-#define TC_BUSY (1 << 9)
-
-#define GRBM_STATUS 0x2004
-#define CMDFIFO_AVAIL_MASK 0x0000000F
-#define RING2_RQ_PENDING (1 << 4)
-#define SRBM_RQ_PENDING (1 << 5)
-#define RING1_RQ_PENDING (1 << 6)
-#define CF_RQ_PENDING (1 << 7)
-#define PF_RQ_PENDING (1 << 8)
-#define GDS_DMA_RQ_PENDING (1 << 9)
-#define GRBM_EE_BUSY (1 << 10)
-#define DB_CLEAN (1 << 12)
-#define CB_CLEAN (1 << 13)
-#define TA_BUSY (1 << 14)
-#define GDS_BUSY (1 << 15)
-#define VGT_BUSY (1 << 17)
-#define IA_BUSY_NO_DMA (1 << 18)
-#define IA_BUSY (1 << 19)
-#define SX_BUSY (1 << 20)
-#define SPI_BUSY (1 << 22)
-#define BCI_BUSY (1 << 23)
-#define SC_BUSY (1 << 24)
-#define PA_BUSY (1 << 25)
-#define DB_BUSY (1 << 26)
-#define CP_COHERENCY_BUSY (1 << 28)
-#define CP_BUSY (1 << 29)
-#define CB_BUSY (1 << 30)
-#define GUI_ACTIVE (1 << 31)
-#define GRBM_STATUS_SE0 0x2005
-#define GRBM_STATUS_SE1 0x2006
-#define SE_DB_CLEAN (1 << 1)
-#define SE_CB_CLEAN (1 << 2)
-#define SE_BCI_BUSY (1 << 22)
-#define SE_VGT_BUSY (1 << 23)
-#define SE_PA_BUSY (1 << 24)
-#define SE_TA_BUSY (1 << 25)
-#define SE_SX_BUSY (1 << 26)
-#define SE_SPI_BUSY (1 << 27)
-#define SE_SC_BUSY (1 << 29)
-#define SE_DB_BUSY (1 << 30)
-#define SE_CB_BUSY (1 << 31)
-
-#define GRBM_INT_CNTL 0x2018
-# define RDERR_INT_ENABLE (1 << 0)
-# define GUI_IDLE_INT_ENABLE (1 << 19)
-
-#define CP_STRMOUT_CNTL 0x213F
-#define SCRATCH_REG0 0x2140
-#define SCRATCH_REG1 0x2141
-#define SCRATCH_REG2 0x2142
-#define SCRATCH_REG3 0x2143
-#define SCRATCH_REG4 0x2144
-#define SCRATCH_REG5 0x2145
-#define SCRATCH_REG6 0x2146
-#define SCRATCH_REG7 0x2147
-
-#define SCRATCH_UMSK 0x2150
-#define SCRATCH_ADDR 0x2151
-
-#define CP_SEM_WAIT_TIMER 0x216F
-
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
-
-#define CP_ME_CNTL 0x21B6
-#define CP_CE_HALT (1 << 24)
-#define CP_PFP_HALT (1 << 26)
-#define CP_ME_HALT (1 << 28)
-
-#define CP_COHER_CNTL2 0x217A
-
-#define CP_RB2_RPTR 0x21BE
-#define CP_RB1_RPTR 0x21BF
-#define CP_RB0_RPTR 0x21C0
-#define CP_RB_WPTR_DELAY 0x21C1
-
-#define CP_QUEUE_THRESHOLDS 0x21D8
-#define ROQ_IB1_START(x) ((x) << 0)
-#define ROQ_IB2_START(x) ((x) << 8)
-#define CP_MEQ_THRESHOLDS 0x21D9
-#define MEQ1_START(x) ((x) << 0)
-#define MEQ2_START(x) ((x) << 8)
-
-#define CP_PERFMON_CNTL 0x21FF
-
#define VGT_VTX_VECT_EJECT_REG 0x222C
-
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
-
#define VGT_GS_VERTEX_REUSE 0x2235
-
#define VGT_PRIMITIVE_TYPE 0x2256
#define VGT_INDEX_TYPE 0x2257
-
#define VGT_NUM_INDICES 0x225C
#define VGT_NUM_INSTANCES 0x225D
-
#define VGT_TF_RING_SIZE 0x2262
-
#define VGT_HS_OFFCHIP_PARAM 0x226C
-
#define VGT_TF_MEMORY_BASE 0x226E
-#define PA_CL_ENHANCE 0x2285
-#define CLIP_VTX_REORDER_ENA (1 << 0)
-#define NUM_CLIP_SEQ(x) ((x) << 1)
-
-#define PA_SU_LINE_STIPPLE_VALUE 0x2298
-
-#define PA_SC_LINE_STIPPLE_STATE 0x22C4
-
-#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
-#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
-
-#define PA_SC_FIFO_SIZE 0x22F3
-#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
-#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
-#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
-#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
-
#define PA_SC_ENHANCE 0x22FC
-#define SQ_CONFIG 0x2300
-
-#define SQC_CACHES 0x2302
-
-#define SQ_POWER_THROTTLE 0x2396
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x2397
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-#define SX_DEBUG_1 0x2418
-
-#define SPI_STATIC_THREAD_MGMT_1 0x2438
-#define SPI_STATIC_THREAD_MGMT_2 0x2439
-#define SPI_STATIC_THREAD_MGMT_3 0x243A
-#define SPI_PS_MAX_WAVE_ID 0x243B
-
-#define SPI_CONFIG_CNTL 0x2440
-
-#define SPI_CONFIG_CNTL_1 0x244F
-#define VTX_DONE_DELAY(x) ((x) << 0)
-#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-
-#define CGTS_TCC_DISABLE 0x2452
-#define CGTS_USER_TCC_DISABLE 0x2453
-#define TCC_DISABLE_MASK 0xFFFF0000
-#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x2454
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x24D5
-
#define TA_CNTL_AUX 0x2542
-#define CC_RB_BACKEND_DISABLE 0x263D
-#define BACKEND_DISABLE(x) ((x) << 16)
-#define GB_ADDR_CONFIG 0x263E
-#define NUM_PIPES(x) ((x) << 0)
-#define NUM_PIPES_MASK 0x00000007
-#define NUM_PIPES_SHIFT 0
-#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
-#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
-#define PIPE_INTERLEAVE_SIZE_SHIFT 4
-#define NUM_SHADER_ENGINES(x) ((x) << 12)
-#define NUM_SHADER_ENGINES_MASK 0x00003000
-#define NUM_SHADER_ENGINES_SHIFT 12
-#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
-#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
-#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
-#define NUM_GPUS(x) ((x) << 20)
-#define NUM_GPUS_MASK 0x00700000
-#define NUM_GPUS_SHIFT 20
-#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
-#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
-#define MULTI_GPU_TILE_SIZE_SHIFT 24
-#define ROW_SIZE(x) ((x) << 28)
-#define ROW_SIZE_MASK 0x30000000
-#define ROW_SIZE_SHIFT 28
-
-#define CB_PERFCOUNTER0_SELECT0 0x2688
-#define CB_PERFCOUNTER0_SELECT1 0x2689
-#define CB_PERFCOUNTER1_SELECT0 0x268A
-#define CB_PERFCOUNTER1_SELECT1 0x268B
-#define CB_PERFCOUNTER2_SELECT0 0x268C
-#define CB_PERFCOUNTER2_SELECT1 0x268D
-#define CB_PERFCOUNTER3_SELECT0 0x268E
-#define CB_PERFCOUNTER3_SELECT1 0x268F
-
-#define CB_CGTT_SCLK_CTRL 0x2698
-
-#define TCP_CHAN_STEER_LO 0x2B03
-#define TCP_CHAN_STEER_HI 0x2B94
-
-#define CP_RB0_BASE 0x3040
-#define CP_RB0_CNTL 0x3041
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define BUF_SWAP_32BIT (2 << 16)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
-
-#define CP_RB0_RPTR_ADDR 0x3043
-#define CP_RB0_RPTR_ADDR_HI 0x3044
-#define CP_RB0_WPTR 0x3045
-
-#define CP_PFP_UCODE_ADDR 0x3054
-#define CP_PFP_UCODE_DATA 0x3055
-#define CP_ME_RAM_RADDR 0x3056
-#define CP_ME_RAM_WADDR 0x3057
-#define CP_ME_RAM_DATA 0x3058
-
-#define CP_CE_UCODE_ADDR 0x305A
-#define CP_CE_UCODE_DATA 0x305B
-
-#define CP_RB1_BASE 0x3060
-#define CP_RB1_CNTL 0x3061
-#define CP_RB1_RPTR_ADDR 0x3062
-#define CP_RB1_RPTR_ADDR_HI 0x3063
-#define CP_RB1_WPTR 0x3064
-#define CP_RB2_BASE 0x3065
-#define CP_RB2_CNTL 0x3066
-#define CP_RB2_RPTR_ADDR 0x3067
-#define CP_RB2_RPTR_ADDR_HI 0x3068
-#define CP_RB2_WPTR 0x3069
-#define CP_INT_CNTL_RING0 0x306A
-#define CP_INT_CNTL_RING1 0x306B
-#define CP_INT_CNTL_RING2 0x306C
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-# define CP_RINGID2_INT_ENABLE (1 << 29)
-# define CP_RINGID1_INT_ENABLE (1 << 30)
-# define CP_RINGID0_INT_ENABLE (1 << 31)
-#define CP_INT_STATUS_RING0 0x306D
-#define CP_INT_STATUS_RING1 0x306E
-#define CP_INT_STATUS_RING2 0x306F
-# define WAIT_MEM_SEM_INT_STAT (1 << 21)
-# define TIME_STAMP_INT_STAT (1 << 26)
-# define CP_RINGID2_INT_STAT (1 << 29)
-# define CP_RINGID1_INT_STAT (1 << 30)
-# define CP_RINGID0_INT_STAT (1 << 31)
-
// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
@@ -1185,171 +311,14 @@
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
+// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0x3bd3
-#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
-#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
-#define UVD_RBC_RB_RPTR 0x3da4
-#define UVD_RBC_RB_WPTR 0x3da5
-#define UVD_STATUS 0x3daf
-
-#define UVD_CGC_CTRL 0x3dc2
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
/*
* PM4
@@ -1583,45 +552,7 @@
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
-
-#define DMA_RB_CNTL 0x3400
-# define DMA_RB_ENABLE (1 << 0)
-# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
-# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define DMA_RB_BASE 0x3401
-#define DMA_RB_RPTR 0x3402
-#define DMA_RB_WPTR 0x3403
-
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-
-#define DMA_IB_CNTL 0x3409
-# define DMA_IB_ENABLE (1 << 0)
-# define DMA_IB_SWAP_ENABLE (1 << 4)
-# define CMD_VMID_FORCE (1 << 31)
-#define DMA_IB_RPTR 0x340a
-#define DMA_CNTL 0x340b
-# define TRAP_ENABLE (1 << 0)
-# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
-# define SEM_WAIT_INT_ENABLE (1 << 2)
-# define DATA_SWAP_ENABLE (1 << 3)
-# define FENCE_SWAP_ENABLE (1 << 4)
-# define CTXEMPTY_INT_ENABLE (1 << 28)
-#define DMA_STATUS_REG 0x340d
-# define DMA_IDLE (1 << 0)
-#define DMA_TILING_CONFIG 0x342e
-
-#define DMA_POWER_CNTL 0x342f
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0x3430
-
-#define DMA_PG 0x3435
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0x3436
-#define DMA_PGFSM_WRITE 0x3437
+#define SDMA_MAX_INSTANCE 2
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
@@ -1650,6 +581,7 @@
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
+/* VCE */
#define VCE_STATUS 0x20004
#define VCE_VCPU_CNTL 0x20014
#define VCE_CLK_EN (1 << 0)
@@ -1726,378 +658,118 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define AMDGPU_MM_INDEX 0x0000
-#define AMDGPU_MM_DATA 0x0001
-
-#define VERDE_NUM_CRTC 6
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define EVERGREEN_DATA_FORMAT 0x1ac0
-# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
-#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
-#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
-
-#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
-#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
-
-#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
-#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-
-#define R600_D1GRPH_SWAP_CONTROL 0x1843
-#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
-
-#define AVIVO_D1VGA_CONTROL 0x00cc
-# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
-# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
-# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
-#define AVIVO_D2VGA_CONTROL 0x00ce
-
-#define R600_BUS_CNTL 0x1508
-# define R600_BIOS_ROM_DIS (1 << 1)
+
#define R600_ROM_CNTL 0x580
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define FMT_BIT_DEPTH_CONTROL 0x1bf2
-#define FMT_TRUNCATE_EN (1 << 0)
-#define FMT_TRUNCATE_DEPTH (1 << 4)
-#define FMT_SPATIAL_DITHER_EN (1 << 8)
-#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-#define FMT_RGB_RANDOM_ENABLE (1 << 14)
-#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-#define FMT_TEMPORAL_DITHER_EN (1 << 16)
-#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-#define FMT_TEMPORAL_LEVEL (1 << 24)
-#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-#define FMT_25FRC_SEL(x) ((x) << 26)
-#define FMT_50FRC_SEL(x) ((x) << 28)
-#define FMT_75FRC_SEL(x) ((x) << 30)
-
-#define EVERGREEN_DC_LUT_CONTROL 0x1a80
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
-#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
-#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
-#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
-#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
-
-#define EVERGREEN_GRPH_ENABLE 0x1a00
-#define EVERGREEN_GRPH_CONTROL 0x1a01
-#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define EVERGREEN_GRPH_DEPTH_8BPP 0
-#define EVERGREEN_GRPH_DEPTH_16BPP 1
-#define EVERGREEN_GRPH_DEPTH_32BPP 2
-#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define EVERGREEN_ADDR_SURF_2_BANK 0
-#define EVERGREEN_ADDR_SURF_4_BANK 1
-#define EVERGREEN_ADDR_SURF_8_BANK 2
-#define EVERGREEN_ADDR_SURF_16_BANK 3
-#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
-#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-
-#define EVERGREEN_GRPH_FORMAT_INDEXED 0
-#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
-#define EVERGREEN_GRPH_FORMAT_ARGB565 1
-#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
-#define EVERGREEN_GRPH_FORMAT_AI88 3
-#define EVERGREEN_GRPH_FORMAT_MONO16 4
-#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+
+#define ES_AND_GS_AUTO 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+/* 8 BPP */
+#define GRPH_FORMAT_INDEXED 0
+
+/* 16 BPP */
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
-#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
-#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
-#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
-#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
-#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
-#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
-#define EVERGREEN_GRPH_FORMAT_RGB111110 6
-#define EVERGREEN_GRPH_FORMAT_BGR101111 7
-#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
-#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
-#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
-#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-
-#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
-#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-# define EVERGREEN_GRPH_ENDIAN_NONE 0
-# define EVERGREEN_GRPH_ENDIAN_8IN16 1
-# define EVERGREEN_GRPH_ENDIAN_8IN32 2
-# define EVERGREEN_GRPH_ENDIAN_8IN64 3
-#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_RED_SEL_R 0
-# define EVERGREEN_GRPH_RED_SEL_G 1
-# define EVERGREEN_GRPH_RED_SEL_B 2
-# define EVERGREEN_GRPH_RED_SEL_A 3
-#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-# define EVERGREEN_GRPH_GREEN_SEL_G 0
-# define EVERGREEN_GRPH_GREEN_SEL_B 1
-# define EVERGREEN_GRPH_GREEN_SEL_A 2
-# define EVERGREEN_GRPH_GREEN_SEL_R 3
-#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-# define EVERGREEN_GRPH_BLUE_SEL_B 0
-# define EVERGREEN_GRPH_BLUE_SEL_A 1
-# define EVERGREEN_GRPH_BLUE_SEL_R 2
-# define EVERGREEN_GRPH_BLUE_SEL_G 3
-#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-# define EVERGREEN_GRPH_ALPHA_SEL_A 0
-# define EVERGREEN_GRPH_ALPHA_SEL_R 1
-# define EVERGREEN_GRPH_ALPHA_SEL_G 2
-# define EVERGREEN_GRPH_ALPHA_SEL_B 3
-
-#define EVERGREEN_D3VGA_CONTROL 0xf8
-#define EVERGREEN_D4VGA_CONTROL 0xf9
-#define EVERGREEN_D5VGA_CONTROL 0xfa
-#define EVERGREEN_D6VGA_CONTROL 0xfb
-
-#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
-
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
-#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
-
-#define EVERGREEN_GRPH_PITCH 0x1a06
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
-#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
-#define EVERGREEN_GRPH_X_START 0x1a0b
-#define EVERGREEN_GRPH_Y_START 0x1a0c
-#define EVERGREEN_GRPH_X_END 0x1a0d
-#define EVERGREEN_GRPH_Y_END 0x1a0e
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
-#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
-
-#define EVERGREEN_VIEWPORT_START 0x1b5c
-#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
-#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
-
-/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
-#define EVERGREEN_CUR_CONTROL 0x1a66
-# define EVERGREEN_CURSOR_EN (1 << 0)
-# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
-# define EVERGREEN_CURSOR_MONO 0
-# define EVERGREEN_CURSOR_24_1 1
-# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
-# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
-# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
-# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
-# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
-# define EVERGREEN_CURSOR_URGENT_1_8 1
-# define EVERGREEN_CURSOR_URGENT_1_4 2
-# define EVERGREEN_CURSOR_URGENT_3_8 3
-# define EVERGREEN_CURSOR_URGENT_1_2 4
-#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
-# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
-#define EVERGREEN_CUR_SIZE 0x1a68
-#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
-#define EVERGREEN_CUR_POSITION 0x1a6a
-#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
-#define EVERGREEN_CUR_COLOR1 0x1a6c
-#define EVERGREEN_CUR_COLOR2 0x1a6d
-#define EVERGREEN_CUR_UPDATE 0x1a6e
-# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
-# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
-# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
-# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define NI_INPUT_CSC_CONTROL 0x1a35
-# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_CSC_BYPASS 0
-# define NI_INPUT_CSC_PROG_COEFF 1
-# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
-# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_OUTPUT_CSC_CONTROL 0x1a3c
-# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
-# define NI_OUTPUT_CSC_BYPASS 0
-# define NI_OUTPUT_CSC_TV_RGB 1
-# define NI_OUTPUT_CSC_YCBCR_601 2
-# define NI_OUTPUT_CSC_YCBCR_709 3
-# define NI_OUTPUT_CSC_PROG_COEFF 4
-# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
-# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
-
-#define NI_DEGAMMA_CONTROL 0x1a58
-# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_DEGAMMA_BYPASS 0
-# define NI_DEGAMMA_SRGB_24 1
-# define NI_DEGAMMA_XVYCC_222 2
-# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
-# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
-
-#define NI_GAMUT_REMAP_CONTROL 0x1a59
-# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
-# define NI_GAMUT_REMAP_BYPASS 0
-# define NI_GAMUT_REMAP_PROG_COEFF 1
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
-# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_REGAMMA_CONTROL 0x1aa0
-# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
-# define NI_REGAMMA_BYPASS 0
-# define NI_REGAMMA_SRGB_24 1
-# define NI_REGAMMA_XVYCC_222 2
-# define NI_REGAMMA_PROG_A 3
-# define NI_REGAMMA_PROG_B 4
-# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-
-
-#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
-# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
-
-#define NI_PRESCALE_OVL_CONTROL 0x1a31
-# define NI_OVL_PRESCALE_BYPASS (1 << 4)
-
-#define NI_INPUT_GAMMA_CONTROL 0x1a10
-# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_GAMMA_USE_LUT 0
-# define NI_INPUT_GAMMA_BYPASS 1
-# define NI_INPUT_GAMMA_SRGB_24 2
-# define NI_INPUT_GAMMA_XVYCC_222 3
-# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
-
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
-
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
-
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
-
-#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
-#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
-#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
-#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
-
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+#define GRPH_RED_SEL_R 0
+#define GRPH_RED_SEL_G 1
+#define GRPH_RED_SEL_B 2
+#define GRPH_RED_SEL_A 3
+
+#define GRPH_GREEN_SEL_G 0
+#define GRPH_GREEN_SEL_B 1
+#define GRPH_GREEN_SEL_A 2
+#define GRPH_GREEN_SEL_R 3
+
+#define GRPH_BLUE_SEL_B 0
+#define GRPH_BLUE_SEL_A 1
+#define GRPH_BLUE_SEL_R 2
+#define GRPH_BLUE_SEL_G 3
+
+#define GRPH_ALPHA_SEL_A 0
+#define GRPH_ALPHA_SEL_R 1
+#define GRPH_ALPHA_SEL_G 2
+#define GRPH_ALPHA_SEL_B 3
+
+/* CUR_CONTROL */
+ #define CURSOR_MONO 0
+ #define CURSOR_24_1 1
+ #define CURSOR_24_8_PRE_MULT 2
+ #define CURSOR_24_8_UNPRE_MULT 3
+ #define CURSOR_URGENT_ALWAYS 0
+ #define CURSOR_URGENT_1_8 1
+ #define CURSOR_URGENT_1_4 2
+ #define CURSOR_URGENT_3_8 3
+ #define CURSOR_URGENT_1_2 4
+
+/* INPUT_CSC_CONTROL */
+# define INPUT_CSC_BYPASS 0
+# define INPUT_CSC_PROG_COEFF 1
+# define INPUT_CSC_PROG_SHARED_MATRIXA 2
+
+/* OUTPUT_CSC_CONTROL */
+# define OUTPUT_CSC_BYPASS 0
+# define OUTPUT_CSC_TV_RGB 1
+# define OUTPUT_CSC_YCBCR_601 2
+# define OUTPUT_CSC_YCBCR_709 3
+# define OUTPUT_CSC_PROG_COEFF 4
+# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+
+/* DEGAMMA_CONTROL */
+# define DEGAMMA_BYPASS 0
+# define DEGAMMA_SRGB_24 1
+# define DEGAMMA_XVYCC_222 2
+
+/* GAMUT_REMAP_CONTROL */
+# define GAMUT_REMAP_BYPASS 0
+# define GAMUT_REMAP_PROG_COEFF 1
+# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+
+/* REGAMMA_CONTROL */
+# define REGAMMA_BYPASS 0
+# define REGAMMA_SRGB_24 1
+# define REGAMMA_XVYCC_222 2
+# define REGAMMA_PROG_A 3
+# define REGAMMA_PROG_B 4
+
+
+/* INPUT_GAMMA_CONTROL */
+# define INPUT_GAMMA_USE_LUT 0
+# define INPUT_GAMMA_BYPASS 1
+# define INPUT_GAMMA_SRGB_24 2
+# define INPUT_GAMMA_XVYCC_222 3
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -2113,20 +785,9 @@
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-#define CONFIG_CNTL 0x1509
-#define CC_DRM_ID_STRAPS 0X1559
#define AMDGPU_PCIE_INDEX 0xc
#define AMDGPU_PCIE_DATA 0xd
-#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
-#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
-#define DMA_MODE 0x342f
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-#define DMA_BUSY_MASK 0x20
-#define DMA1_BUSY_MASK 0X40
-#define SDMA_MAX_INSTANCE 2
-
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
#define PCIE_PORT_INDEX 0xe
@@ -2136,8 +797,6 @@
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define MC_VM_FB_OFFSET 0x81a
-
/* Discrete VCE clocks */
#define CG_VCEPLL_FUNC_CNTL 0xc0030600
#define VCEPLL_RESET_MASK 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 659eab9b90be..9e74c9822e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -584,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
+ if (adev->pcie_reset_ctx.in_link_reset)
+ return AMD_RESET_METHOD_LINK;
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
@@ -640,6 +642,9 @@ asic_reset:
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
+ case AMD_RESET_METHOD_LINK:
+ dev_info(adev->dev, "Link reset\n");
+ return amdgpu_device_link_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
@@ -1213,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index ef7c603b50ae..c8ac11a9cdef 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index a5000c171c02..cf93fa477674 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -552,6 +552,11 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
#define PACKET3_RUN_CLEANER_SHADER 0xD2
/* 1. header
* 2. RESERVED [31:0]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 0e404c074975..e590cbdd8de9 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
+static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t vram_type = adev->gmc.vram_type;
+ struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case AMDGPU_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case AMDGPU_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ dev_warn(adev->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+
+ adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
+}
+
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
- uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
- uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -211,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
- loop_bits[0] = UMC_V12_0_PA_C2_BIT;
- loop_bits[1] = UMC_V12_0_PA_C3_BIT;
- loop_bits[2] = UMC_V12_0_PA_C4_BIT;
- loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-
- /* other nps modes are taken as nps1 */
- if (nps == AMDGPU_NPS4_PARTITION_MODE) {
- loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
- loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
- loop_bits[2] = UMC_V12_0_PA_B0_BIT;
- loop_bits[3] = UMC_V12_0_PA_R11_BIT;
- }
+ flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
+ bit_num = adev->umc.flip_bits.bit_num;
+ retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa &= ~BIT_ULL(loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
- /* MA_R13_BIT will be handled later */
+ /* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
+ row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
- for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
- /* add row bit 13 */
- row = ((column >> 3) << 13) | row_lower;
+ /* handle extra row bit */
+ if (bit_num == RETIRE_FLIP_BITS_NUM)
+ row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
+ row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -428,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
- count = ext_error_code == 0 ?
- ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+ if (umc_v12_0_is_deferred_error(adev, status))
+ count = ext_error_code == 0 ?
+ adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
+ else
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -469,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
- uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
+ uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -515,11 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- if (nps == AMDGPU_NPS4_PARTITION_MODE)
- shift_bit = UMC_V12_0_PA_B0_BIT;
-
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -665,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
+ .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 9298018d938f..63b7e7254526 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
-#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,13 +62,16 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35
/* channel bit in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
/* bank bit in SOC physical address */
#define UMC_V12_0_PA_B0_BIT 19
+#define UMC_V12_0_PA_B1_BIT 20
/* row bits in MCA address */
#define UMC_V12_0_MA_R0_BIT 10
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21b57c29bf7d..c74947705d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1009,6 +1009,11 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 0);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1154,6 +1159,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 1);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1216,6 +1226,12 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
vcn_v1_0_enable_clock_gating(vinst);
vcn_1_0_enable_static_power_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1250,6 +1266,11 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 8e7a36f26e9c..68b4371df0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
@@ -97,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
+
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
@@ -212,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].reset = vcn_v2_0_reset;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -232,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -259,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -977,6 +992,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1151,6 +1172,11 @@ static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1182,6 +1208,11 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1247,6 +1278,11 @@ static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
vcn_v2_0_enable_clock_gating(vinst);
vcn_v2_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS);
+
power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);
@@ -1333,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_0_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_0_start(vinst);
+}
+
static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2154,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
@@ -2183,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index d716510b8dd6..bc30a5326866 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
@@ -101,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -403,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ adev->vcn.inst[j].reset = vcn_v2_5_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -424,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -454,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -1157,6 +1171,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1342,6 +1361,11 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
return 0;
}
@@ -1568,6 +1592,11 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1634,6 +1663,10 @@ static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1796,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -1894,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1922,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_5_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_5_start(vinst);
+}
+
static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 22ae1939476f..4b8f4407047f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
@@ -109,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -288,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ adev->vcn.inst[i].reset = vcn_v3_0_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -305,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -337,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -1172,6 +1186,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1359,6 +1378,11 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
return 0;
}
@@ -1601,6 +1625,11 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1673,6 +1702,11 @@ static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v3_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -2012,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -2110,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2143,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v3_0_stop(vinst);
+ if (r)
+ return r;
+ vcn_v3_0_enable_clock_gating(vinst);
+ vcn_v3_0_enable_static_power_gating(vinst);
+ return vcn_v3_0_start(vinst);
+}
+
static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index c6f6392c1c20..1924e075b66f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -238,9 +239,10 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -614,7 +616,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -1120,6 +1123,11 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
@@ -1301,6 +1309,11 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
return 0;
}
@@ -1581,6 +1594,11 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@@ -1664,6 +1682,11 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1945,6 +1968,24 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1974,6 +2015,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 3e176b4b7c69..2a3663b551af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
@@ -168,6 +169,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_init(adev, i);
@@ -287,6 +292,31 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
+{
+ int vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_ring *ring;
+ int inst_idx = vinst->inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
+ adev->vcn.inst[inst_idx].aid_id);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ }
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
@@ -298,7 +328,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ struct amdgpu_vcn_inst *vinst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
@@ -321,28 +352,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
+ vinst = &adev->vcn.inst[i];
+ vcn_v4_0_3_hw_init_inst(vinst);
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
@@ -379,6 +391,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
return 0;
}
@@ -962,6 +977,11 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
/*resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1355,6 +1375,12 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1438,6 +1464,11 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
/* apply HW clock gating */
vcn_v4_0_3_enable_clock_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
Done:
return 0;
}
@@ -1563,6 +1594,34 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ vcn_v4_0_3_hw_init_inst(vinst);
+ vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1591,6 +1650,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_3_ring_reset,
};
/**
@@ -1774,11 +1834,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
.set = vcn_v4_0_3_set_interrupt_state,
.process = vcn_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
+ .set = vcn_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
/**
* vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1794,6 +1867,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.num_types++;
}
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
+
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1941,9 +2017,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+ .query_poison_status = vcn_v4_0_3_query_poison_status,
};
static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -2019,6 +2130,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
&vcn_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 03572a1d0c9c..aeab89853a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -24,6 +24,12 @@
#ifndef __VCN_V4_0_3_H__
#define __VCN_V4_0_3_H__
+enum amdgpu_vcn_v4_0_3_sub_block {
+ AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index ba603b2246e2..caf2d95a85d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
+#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -207,6 +208,10 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -214,6 +219,14 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
+ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -1022,6 +1035,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
+
return 0;
}
@@ -1204,6 +1221,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+
return 0;
}
@@ -1234,6 +1255,11 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@@ -1317,6 +1343,11 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_5_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1435,6 +1466,24 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_5_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_5_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1462,6 +1511,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_5_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d99d05f42f1d..07a6e9582880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -196,9 +196,10 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -533,7 +534,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
return;
}
@@ -793,6 +795,11 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
@@ -945,6 +952,11 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
return 0;
}
@@ -976,6 +988,11 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return;
}
@@ -1057,6 +1074,11 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v5_0_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1171,6 +1193,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v5_0_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v5_0_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1198,6 +1238,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index e0e84ef7f568..cdefd7fcb0da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -30,6 +30,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "vcn_v4_0_3.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -39,12 +40,13 @@
#include <drm/drm_drv.h>
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
-
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v5_0_1_early_init - set function pointers and load microcode
*
@@ -64,6 +66,7 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
+ vcn_v5_0_1_set_ras_funcs(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
@@ -111,6 +114,10 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
@@ -126,7 +133,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -143,6 +157,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
vcn_v5_0_0_alloc_ip_dump(adev);
return amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -172,6 +192,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -204,24 +227,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
- adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst),
- adev->vcn.inst[i].aid_id);
-
- /* Re-init fw_shared, if required */
- vcn_v5_0_1_fw_shared_init(adev, i);
-
- r = amdgpu_ring_test_helper(ring);
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v5_0_1_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
return 0;
@@ -247,6 +284,9 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
return 0;
}
@@ -629,6 +669,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ /* resetting ring, fw should not check RB ring */
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
/* Pause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
@@ -641,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
@@ -652,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
@@ -663,6 +707,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V5_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
/**
* vcn_v5_0_1_start - VCN start
*
@@ -809,6 +1042,11 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -843,6 +1081,11 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
}
/**
@@ -918,6 +1161,11 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
/* clear status */
WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1103,8 +1351,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = vinst->adev;
int ret = 0;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == vinst->cur_state)
return 0;
@@ -1160,10 +1418,24 @@ static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgp
return 0;
}
+static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
.process = vcn_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
+ .set = vcn_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
+
/**
* vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1177,7 +1449,12 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
adev->vcn.inst->irq.num_types++;
+
adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
+
}
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
@@ -1209,3 +1486,139 @@ const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
.rev = 1,
.funcs = &vcn_v5_0_1_ip_funcs,
};
+
+static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
+ .query_poison_status = vcn_v5_0_1_query_poison_status,
+};
+
+static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v5_0_1_err_codes[] = {
+ 14, 15, /* VCN */
+};
+
+static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v5_0_1_err_codes,
+ ARRAY_SIZE(vcn_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v5_0_1_aca_bank_ops,
+};
+
+static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v5_0_1_ras_hw_ops,
+ .ras_late_init = vcn_v5_0_1_ras_late_init,
+ },
+};
+
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ras = &vcn_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
index 8fd90bd10807..b72e4da68317 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -27,6 +27,13 @@
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+enum amdgpu_vcn_v5_0_1_sub_block {
+ AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index faa0dd75dd6d..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
if (!amdgpu_sriov_vf(adev))
@@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index d3c3d3ab7225..16e12c9913f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 981d9adcc5e1..73acbe0b7c21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -91,7 +91,6 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
- unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
@@ -125,11 +124,7 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
return;
}
- if (info.vmid == vmid)
- kfd_signal_vm_fault_event(pdd, &info, NULL);
- else
- kfd_signal_vm_fault_event(pdd, &info, NULL);
-
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
kfd_unref_process(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1e9dd00620bf..828a9ceef1e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -2039,9 +2038,7 @@ static int criu_get_process_object_info(struct kfd_process *p,
num_events = kfd_get_num_events(p);
- ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
- if (ret)
- return ret;
+ svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
*num_objects = num_queues + num_events + num_svm_ranges;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4a5a0a4e00f2..9bde2c64540f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -27,6 +27,16 @@
#include "kfd_priv.h"
static struct dentry *debugfs_root;
+static struct dentry *debugfs_proc;
+static struct list_head procs;
+
+struct debugfs_proc_entry {
+ struct list_head list;
+ struct dentry *proc_dentry;
+ pid_t pid;
+};
+
+#define MAX_DEBUGFS_FILENAME_LEN 32
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
@@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = {
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
+ debugfs_proc = debugfs_create_dir("proc", debugfs_root);
+ INIT_LIST_HEAD(&procs);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
@@ -107,5 +119,69 @@ void kfd_debugfs_init(void)
void kfd_debugfs_fini(void)
{
+ debugfs_remove_recursive(debugfs_proc);
debugfs_remove_recursive(debugfs_root);
}
+
+static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kfd_process_device *pdd = file_inode(file)->i_private;
+ char tmp[32];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid);
+
+ return simple_read_from_buffer(buf, count, ppos, tmp, len);
+}
+
+static const struct file_operations kfd_debugfs_pasid_fops = {
+ .owner = THIS_MODULE,
+ .read = kfd_debugfs_pasid_read,
+};
+
+void kfd_debugfs_add_process(struct kfd_process *p)
+{
+ int i;
+ char name[MAX_DEBUGFS_FILENAME_LEN];
+ struct debugfs_proc_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ list_add(&entry->list, &procs);
+ entry->pid = p->lead_thread->pid;
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d",
+ (int)entry->pid);
+ entry->proc_dentry = debugfs_create_dir(name, debugfs_proc);
+
+ /* Create debugfs files for each GPU:
+ * - proc/<pid>/pasid_<gpuid>
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u",
+ pdd->dev->id);
+ debugfs_create_file((const char *)name, S_IFREG | 0444,
+ entry->proc_dentry, pdd,
+ &kfd_debugfs_pasid_fops);
+ }
+}
+
+void kfd_debugfs_remove_process(struct kfd_process *p)
+{
+ struct debugfs_proc_entry *entry, *next;
+
+ mutex_lock(&kfd_processes_mutex);
+ list_for_each_entry_safe(entry, next, &procs, list) {
+ if (entry->pid != p->lead_thread->pid)
+ continue;
+
+ debugfs_remove_recursive(entry->proc_dentry);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b9c82be6ce13..7e749f9b6d69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -352,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = adev->rev_id >= 1 ? 90402
- : adev->flags & AMD_IS_APU ? 90400
- : 90401;
- f2g = &gc_9_4_3_kfd2kgd;
- break;
case IP_VERSION(9, 4, 4):
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
@@ -976,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
kfd_smi_event_update_gpu_reset(node, false, reset_context);
}
- kgd2kfd_suspend(kfd, false);
+ kgd2kfd_suspend(kfd, true);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
@@ -1018,13 +1013,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return 0;
}
-bool kfd_is_locked(void)
+bool kfd_is_locked(struct kfd_dev *kfd)
{
+ uint8_t id = 0;
+ struct kfd_node *dev;
+
lockdep_assert_held(&kfd_processes_mutex);
- return (kfd_locked > 0);
+
+ /* check reset/suspend lock */
+ if (kfd_locked > 0)
+ return true;
+
+ if (kfd)
+ return kfd->kfd_dev_lock > 0;
+
+ /* check lock on all cgroup accessible devices */
+ while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
+ if (!dev || kfd_devcgroup_check_permission(dev))
+ continue;
+
+ if (dev->kfd->kfd_dev_lock > 0)
+ return true;
+ }
+
+ return false;
}
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
struct kfd_node *node;
int i;
@@ -1032,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
if (!kfd->init_complete)
return;
- /* for runtime suspend, skip locking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- /* For first KFD device suspend all the KFD processes */
- if (++kfd_locked == 1)
- kfd_suspend_all_processes();
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (suspend_proc)
+ kgd2kfd_suspend_process(kfd);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
@@ -1047,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
}
}
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
{
int ret, i;
@@ -1060,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
- /* for runtime resume, skip unlocking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- if (--kfd_locked == 0)
- ret = kfd_resume_all_processes();
- WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (resume_proc)
+ ret = kgd2kfd_resume_process(kfd);
+
+ return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return;
+
+ mutex_lock(&kfd_processes_mutex);
+ /* For first KFD device suspend all the KFD processes */
+ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ mutex_lock(&kfd_processes_mutex);
+ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
return ret;
}
@@ -1447,24 +1478,53 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
kfd_get_num_sdma_engines(node);
}
-int kgd2kfd_check_and_lock_kfd(void)
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
+ struct kfd_process *p;
+ int r = 0, temp, idx;
+
mutex_lock(&kfd_processes_mutex);
- if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
- return -EBUSY;
+
+ if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
+ goto out;
+
+ /* fail under system reset/resume or kfd device is partition switching. */
+ if (kfd_is_locked(kfd)) {
+ r = -EBUSY;
+ goto out;
}
- ++kfd_locked;
+ /*
+ * ensure all running processes are cgroup excluded from device before mode switch.
+ * i.e. no pdd was created on the process socket.
+ */
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->kfd != kfd)
+ continue;
+
+ r = -EBUSY;
+ goto proc_check_unlock;
+ }
+ }
+
+proc_check_unlock:
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+out:
+ if (!r)
+ ++kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
- return 0;
+ return r;
}
-void kgd2kfd_unlock_kfd(void)
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
mutex_lock(&kfd_processes_mutex);
- --kfd_locked;
+ --kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c610e172a2b8..6c5c7c1bf5ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1576,8 +1576,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) {
+ dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n",
+ get_num_sdma_queues(dqm));
return -ENOMEM;
}
@@ -1602,8 +1603,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more XGMI SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) {
+ dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n",
+ get_num_xgmi_sdma_queues(dqm));
return -ENOMEM;
}
if (restore_sdma_id) {
@@ -1662,8 +1664,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
}
if (!free_bit_found) {
- dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
- q->properties.sdma_engine_id);
+ dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n",
+ q->properties.sdma_engine_id, num_queues);
return -ENOMEM;
}
}
@@ -2310,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
continue;
/* Reset engine and check. */
- if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) ||
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
!set_sdma_queue_as_reset(dqm, doorbell_off)) {
r = -ENOTRECOVERABLE;
@@ -2337,9 +2339,18 @@ reset_fail:
static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
{
+ struct amdgpu_device *adev = dqm->dev->adev;
+
while (halt_if_hws_hang)
schedule();
+ if (adev->debug_disable_gpu_ring_reset) {
+ dev_info_once(adev->dev,
+ "%s queue hung, but ring reset disabled",
+ is_sdma ? "sdma" : "compute");
+
+ return -EPERM;
+ }
if (!amdgpu_gpu_recovery)
return -ENOTRECOVERABLE;
@@ -2714,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index fecdb6794075..82905f3e54dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1177,6 +1177,25 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_hsa_memory_exception_data exception_data;
+ int i;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.va = gpu_va;
+ exception_data.failure.NotPresent = 1;
+
+ // Send VM seg fault to all kfd process device
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+ exception_data.gpu_id = pdd->user_gpu_id;
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
+ }
+}
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
@@ -1283,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -1331,6 +1350,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ kfd_unref_process(p);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index dbcb60eb54b2..1d170dc50df3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -23,7 +23,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 37b69fe0ede3..3e1ad8974797 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.
@@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF0_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF1_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID));
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
@@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
sq_intr_err_type);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index c5f97e6e36ff..2788a52714d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_warn_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ TYPE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
@@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.
@@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(dev, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(dev, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(dev, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index b8a91bf4ef30..4ceb251312a6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -314,11 +314,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev,
& ~pasid_mask) | pasid);
}
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
@@ -379,28 +380,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -412,14 +467,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 80320a6c8854..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -495,6 +513,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+ /* Allow context switch so we don't cross-process starve with a massive
+ * command buffer of long-running SDMA commands
+ */
+ m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
@@ -760,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
- restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
- if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
- struct v9_mqd *m;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
- m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
- m->cp_hqd_pq_doorbell_control |= 1 <<
- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@@ -902,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -914,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
- mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 271c567242ab..b1a6eb349bb3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- int *over_subscription)
+ int *over_subscription,
+ int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+ if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- int *is_over_subscription)
+ int *is_over_subscription,
+ int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
- pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
+ xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
+ int xnack_enabled = -1;
+ bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
+ /* Check if processes set different xnack modes */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ if (xnack_enabled < 0)
+ /* First process */
+ xnack_enabled = qpd->pqm->process->xnack_enabled;
+ else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
+ /* Found a process with a different xnack mode */
+ xnack_conflict = 1;
+ break;
+ }
+ }
+
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
- &alloc_size_bytes, &is_over_subscription);
+ &alloc_size_bytes, &is_over_subscription,
+ xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
+build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
+ /* group processes with the same xnack mode together */
+ if (qpd->pqm->process->xnack_enabled != xnack_enabled)
+ continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
+ if (xnack_conflict) {
+ /* pick up processes with the other xnack mode */
+ xnack_enabled = !xnack_enabled;
+ xnack_conflict = 0;
+ goto build_runlist_ib;
+ }
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
- is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
- " too many processes." : "",
- is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
- " too many queues." : "",
- is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
- " multiple processes using cooperative launch." : "");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
+ " xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2893fd5e5d00..505036968a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
- if (adev->enforce_isolation[kfd->node_id])
+ if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -102,7 +102,8 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
- if (adev->enforce_isolation[knode->node_id])
+ if (adev->enforce_isolation[knode->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
- 1 : min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
+ concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE) ?
+ 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
@@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
+ packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
@@ -237,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index cd8611401a66..e356a207d03c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
- uint32_t reserved1:5;
+ uint32_t reserved1:4;
+ uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f6aedf69c644..67694bcd9464 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -372,6 +372,9 @@ struct kfd_dev {
/* bitmap for dynamic doorbell allocation from doorbell object */
unsigned long *doorbell_bitmap;
+
+ /* for dynamic partitioning */
+ int kfd_dev_lock;
};
enum kfd_mempool {
@@ -1507,6 +1510,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va);
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
@@ -1534,7 +1539,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason);
-bool kfd_is_locked(void);
+bool kfd_is_locked(struct kfd_dev *kfd);
/* Compute profile */
void kfd_inc_compute_active(struct kfd_node *dev);
@@ -1581,10 +1586,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
+void kfd_debugfs_add_process(struct kfd_process *p);
+void kfd_debugfs_remove_process(struct kfd_process *p);
+
#else
static inline void kfd_debugfs_init(void) {}
static inline void kfd_debugfs_fini(void) {}
+static inline void kfd_debugfs_add_process(struct kfd_process *p) {}
+static inline void kfd_debugfs_remove_process(struct kfd_process *p) {}
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7c0c24732481..5be28c6c4f6a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
*/
mutex_lock(&kfd_processes_mutex);
- if (kfd_is_locked()) {
+ if (kfd_is_locked(NULL)) {
pr_debug("KFD is locked! Cannot create process");
process = ERR_PTR(-EINVAL);
goto out;
@@ -900,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+ kfd_debugfs_add_process(process);
+
init_waitqueue_head(&process->wait_irq_drain);
}
out:
@@ -1054,6 +1056,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ kfd_smi_event_process(pdd, false);
+
pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
@@ -1174,6 +1178,7 @@ static void kfd_process_wq_release(struct work_struct *work)
dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1715,6 +1720,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
err_get_pasid:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7eb370b68159..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -279,20 +279,17 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
- if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
- if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
- pr_err("Queue memory allocated to wrong device\n");
- retval = -EINVAL;
- goto free_gang_ctx_bo;
- }
+ if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ retval = -EINVAL;
+ goto free_gang_ctx_bo;
+ }
- retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
- &(*q)->wptr_bo_gart);
- if (retval) {
- pr_err("Failed to map wptr bo to GART\n");
- goto free_gang_ctx_bo;
- }
+ retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
+ &(*q)->wptr_bo_gart);
+ if (retval) {
+ pr_err("Failed to map wptr bo to GART\n");
+ goto free_gang_ctx_bo;
}
}
@@ -451,8 +448,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
- pqm->process->lead_thread->pid, type, retval);
+ if ((type == KFD_QUEUE_TYPE_SDMA ||
+ type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) &&
+ retval == -ENOMEM)
+ pr_warn("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
+ else
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
@@ -910,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -959,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -972,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1032,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 4afff7094caf..a65c67cf56ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -402,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
{
u32 vgpr_size = 0x40000;
- if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
gfxv == 90500)
@@ -462,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
- else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */
props->eop_buffer_size = 4096;
else if (gfxv >= 80000)
props->eop_buffer_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 9b8169761ec5..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
- uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
- if (pid && client->pid != pid && !(client->suser && (events & all)))
+ if (pid && client->pid != pid && !client->suser)
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
@@ -254,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
pid, address, last - address + 1, node->id, trigger));
}
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
+{
+ struct amdgpu_task_info *task_info;
+ struct amdgpu_vm *avm;
+
+ if (!pdd->drm_priv)
+ return;
+
+ avm = drm_priv_to_vm(pdd->drm_priv);
+ task_info = amdgpu_vm_get_task_info_vm(avm);
+
+ if (task_info) {
+ kfd_smi_event_add(0, pdd->dev,
+ start ? KFD_SMI_EVENT_PROCESS_START :
+ KFD_SMI_EVENT_PROCESS_END,
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
+ amdgpu_vm_put_task_info(task_info);
+ }
+}
+
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 503bff13d815..bb4d72b57387 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 100717a98ec1..a0f22ea6d15a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
}
static void
-svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
- struct svm_range *pchild, enum svm_work_list_ops op)
+svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
{
pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
pchild, pchild->start, pchild->last, prange, op);
- pchild->work_item.mm = mm;
+ pchild->work_item.mm = NULL;
pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list);
}
@@ -1245,8 +1244,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
+ mtype_local = AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1279,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- if (gc_ip_version < IP_VERSION(9, 5, 0))
+ if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
@@ -2395,15 +2393,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
prange->work_item.op != SVM_OP_UNMAP_RANGE)
prange->work_item.op = op;
} else {
- prange->work_item.op = op;
-
- /* Pairs with mmput in deferred_list_work */
- mmget(mm);
- prange->work_item.mm = mm;
- list_add_tail(&prange->deferred_list,
- &prange->svms->deferred_range_list);
- pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
- prange, prange->start, prange->last, op);
+ /* Pairs with mmput in deferred_list_work.
+ * If process is exiting and mm is gone, don't update mmu notifier.
+ */
+ if (mmget_not_zero(mm)) {
+ prange->work_item.mm = mm;
+ prange->work_item.op = op;
+ list_add_tail(&prange->deferred_list,
+ &prange->svms->deferred_range_list);
+ pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
+ prange, prange->start, prange->last, op);
+ }
}
spin_unlock(&svms->deferred_list_lock);
}
@@ -2417,8 +2417,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
}
static void
-svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
- struct svm_range *prange, unsigned long start,
+svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
unsigned long last)
{
struct svm_range *head;
@@ -2439,12 +2438,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
svm_range_split(tail, last + 1, tail->last, &head);
if (head != prange && tail != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
} else if (tail != prange) {
- svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
} else if (head != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
} else if (parent != prange) {
prange->work_item.op = SVM_OP_UNMAP_RANGE;
}
@@ -2521,14 +2520,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
l = min(last, pchild->last);
if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger);
- svm_range_unmap_split(mm, prange, pchild, start, last);
+ svm_range_unmap_split(prange, pchild, start, last);
mutex_unlock(&pchild->lock);
}
s = max(start, prange->start);
l = min(last, prange->last);
if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger);
- svm_range_unmap_split(mm, prange, prange, start, last);
+ svm_range_unmap_split(prange, prange, start, last);
if (unmap_parent)
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
@@ -2571,8 +2570,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE)
return true;
- if (!mmget_not_zero(mni->mm))
- return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
@@ -2599,7 +2596,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
}
svm_range_unlock(prange);
- mmput(mni->mm);
return true;
}
@@ -4076,8 +4072,8 @@ exit:
return ret;
}
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
uint64_t total_size, accessibility_size, common_attr_size;
int nattr_common = 4, nattr_accessibility = 1;
@@ -4089,8 +4085,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
*svm_priv_data_size = 0;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mutex_lock(&svms->lock);
list_for_each_entry(prange, &svms->list, list) {
@@ -4132,7 +4126,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
*svm_priv_data_size);
- return 0;
}
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -4149,8 +4142,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p,
struct mm_struct *mm;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mm = get_task_mm(p->lead_thread);
if (!mm) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 6ea23c78009c..01c7a4877904 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -184,8 +184,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_dma_unmap(struct svm_range *prange);
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size);
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_offset);
@@ -237,13 +237,12 @@ static inline int svm_range_schedule_evict_svm_bo(
return -EINVAL;
}
-static inline int svm_range_get_info(struct kfd_process *p,
- uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+static inline void svm_range_get_info(struct kfd_process *p,
+ uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
*num_svm_ranges = 0;
*svm_priv_data_size = 0;
- return 0;
}
static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 9bbee484d57c..4ec73f33535e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
+ (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
@@ -1267,34 +1271,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
{
struct kfd_node *gpu = outbound_link->gpu;
struct amdgpu_device *adev = gpu->adev;
- int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu);
+ unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu);
+ uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1;
+ uint32_t xgmi_sdma_eng_id_mask =
+ ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines;
+
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
- kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
- (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+ num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) &&
+ num_xgmi_nodes == 8);
if (support_rec_eng) {
int src_socket_id = adev->gmc.xgmi.physical_node_id;
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+ unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0;
outbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift);
inbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
- } else {
- int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
- int i, eng_offset = 0;
+ 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift);
- if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
- eng_offset = num_sdma_eng;
- num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
- }
+ /* If recommended engine is out of range, need to reset the mask */
+ if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
+ if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
- for (i = 0; i < num_sdma_eng; i++) {
- outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- }
+ } else {
+ uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
+ sdma_eng_id_mask;
+
+ outbound_link->rec_sdma_eng_id_mask = engine_mask;
+ inbound_link->rec_sdma_eng_id_mask = engine_mask;
}
}
@@ -2001,8 +2012,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (!amdgpu_sriov_vf(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index faed84172dd4..8bc36f04b1b7 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..7329b8cc2576 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,6 +38,7 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
+ amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 536f73131c2d..cd0e2976e268 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,7 +80,6 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -115,6 +114,8 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -280,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -301,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -372,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -749,6 +752,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -772,18 +798,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -800,11 +826,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -816,7 +842,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -835,7 +861,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -871,7 +897,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -885,6 +911,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -902,22 +952,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -927,7 +968,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -935,25 +976,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -1011,10 +1052,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1179,13 +1220,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1195,19 +1236,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1290,6 +1331,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1297,14 +1339,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1315,11 +1357,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1362,18 +1404,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1483,18 +1525,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1563,8 +1605,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1580,7 +1623,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1629,153 +1672,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-struct amdgpu_dm_quirks {
- bool aux_hpd_discon;
- bool support_edp0_on_dp1;
-};
-
-static struct amdgpu_dm_quirks quirk_entries = {
- .aux_hpd_discon = false,
- .support_edp0_on_dp1 = false
-};
-
-static int edp0_on_dp1_callback(const struct dmi_system_id *id)
-{
- quirk_entries.support_edp0_on_dp1 = true;
- return 0;
-}
-
-static int aux_hpd_discon_callback(const struct dmi_system_id *id)
-{
- quirk_entries.aux_hpd_discon = true;
- return 0;
-}
-
-static const struct dmi_system_id dmi_quirk_table[] = {
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
-{
- int dmi_id;
- struct drm_device *dev = dm->ddev;
-
- dm->aux_hpd_discon_quirk = false;
- init_data->flags.support_edp0_on_dp1 = false;
-
- dmi_id = dmi_check_system(dmi_quirk_table);
-
- if (!dmi_id)
- return;
-
- if (quirk_entries.aux_hpd_discon) {
- dm->aux_hpd_discon_quirk = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
- }
- if (quirk_entries.support_edp0_on_dp1) {
- init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "support_edp0_on_dp1 attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1862,10 +1758,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1878,6 +1775,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1885,7 +1783,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1951,7 +1849,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2062,7 +1960,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm, &init_data);
+ retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2073,10 +1973,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2102,33 +2002,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
adev->dm.dc->debug.force_disable_subvp = true;
+ adev->dm.dc->debug.fams2_config.bits.enable = false;
+ }
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
+ if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2143,10 +2054,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
- "amdgpu: failed to initialize freesync_module.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2155,7 +2066,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2166,9 +2077,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2176,20 +2087,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2206,8 +2126,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2221,22 +2141,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize sw for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2409,7 +2329,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2427,7 +2347,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2486,6 +2406,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2552,7 +2473,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2561,7 +2482,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2574,7 +2495,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2599,7 +2520,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2628,14 +2549,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2652,7 +2573,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -2958,7 +2879,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
@@ -2976,13 +2897,13 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
if (oem_ddc_service) {
oem_i2c = create_i2c(oem_ddc_service, true);
if (!oem_i2c) {
- dev_info(adev->dev, "Failed to create oem i2c adapter data\n");
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
return -ENOMEM;
}
r = i2c_add_adapter(&oem_i2c->base);
if (r) {
- dev_info(adev->dev, "Failed to register oem i2c\n");
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
return r;
}
@@ -3025,7 +2946,7 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
r = dm_oem_i2c_hw_init(adev);
if (r)
- dev_info(adev->dev, "Failed to add OEM i2c bus\n");
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
return 0;
}
@@ -3068,7 +2989,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
@@ -3078,14 +2999,14 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -3142,6 +3063,77 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_cache_state(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
+
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+}
+
+static void dm_complete(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ dm_destroy_cached_state(adev);
+}
+
static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3150,11 +3142,8 @@ static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
return 0;
WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
- return 0;
+ return dm_cache_state(adev);
}
static int dm_suspend(struct amdgpu_ip_block *ip_block)
@@ -3188,9 +3177,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
}
if (!adev->dm.cached_state) {
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
}
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3377,12 +3367,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3414,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
+ if (r) {
drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3461,11 +3447,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-
- /* leave display off for S4 sequence */
- if (adev->in_s4)
- return 0;
-
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3544,40 +3525,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
@@ -3626,6 +3574,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
+ .complete = dm_complete,
.is_idle = dm_is_idle,
.wait_for_idle = dm_wait_for_idle,
.check_soft_reset = dm_check_soft_reset,
@@ -3697,13 +3646,15 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
luminance_range = &conn_base->display_info.luminance_range;
- if (luminance_range->max_luminance) {
- caps->aux_min_input_signal = luminance_range->min_luminance;
+ if (luminance_range->max_luminance)
caps->aux_max_input_signal = luminance_range->max_luminance;
- } else {
- caps->aux_min_input_signal = 0;
+ else
caps->aux_max_input_signal = 512;
- }
+
+ if (luminance_range->min_luminance)
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ else
+ caps->aux_min_input_signal = 1;
min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
if (min_input_signal_override >= 0)
@@ -3920,20 +3871,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3975,7 +3927,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3997,7 +3949,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4014,7 +3966,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4024,7 +3976,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -4087,19 +4039,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4120,7 +4072,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -4138,7 +4090,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -4180,7 +4132,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4191,7 +4143,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4210,7 +4162,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4221,7 +4173,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4239,7 +4191,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4281,7 +4233,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4292,7 +4244,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4310,7 +4262,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4321,7 +4273,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4340,7 +4292,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4351,7 +4303,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4369,7 +4321,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4419,7 +4371,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4430,7 +4382,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4451,7 +4403,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4462,7 +4414,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4490,7 +4442,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4501,7 +4453,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4521,7 +4473,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4532,7 +4484,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4550,7 +4502,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4572,7 +4524,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4804,45 +4756,72 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t brightness)
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
- unsigned int min, max;
- u8 prev_signal = 0, prev_lum = 0;
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
+}
- if (!get_brightness_range(caps, &min, &max))
- return brightness;
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
+}
+
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
+{
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
+ u8 prev_signal = 0, prev_lum = 0;
+ int i = 0;
- for (int i = 0; i < caps->data_points; i++) {
- u8 signal, lum;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
- if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
- break;
+ if (!caps->data_points)
+ return;
- signal = caps->luminance_data[i].input_signal;
- lum = caps->luminance_data[i].luminance;
+ /* choose start to run less interpolation steps */
+ if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
+ i = caps->data_points/2;
+ do {
+ u8 signal = caps->luminance_data[i].input_signal;
+ u8 lum = caps->luminance_data[i].luminance;
/*
* brightness == signal: luminance is percent numerator
* brightness < signal: interpolate between previous and current luminance numerator
* brightness > signal: find next data point
*/
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- else if (brightness > signal) {
+ if (brightness > signal) {
prev_signal = signal;
prev_lum = lum;
+ i++;
continue;
}
- brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
- break;
- }
+ if (brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (brightness - prev_signal),
+ signal - prev_signal);
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
+ return;
+ } while (i < caps->data_points);
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ convert_custom_brightness(caps, min, max, &brightness);
+
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4855,8 +4834,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4886,6 +4865,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -4981,8 +4968,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
+ int min, max;
if (aconnector->bl_idx == -1)
return;
@@ -4994,18 +4982,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = caps.ac_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = caps.dc_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
+ /* min is zero, so max needs to be adjusted */
+ props.max_brightness = max - min;
+ drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
drm_info(drm, "Using custom brightness curve\n");
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -5017,10 +5008,10 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5034,7 +5025,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -5052,7 +5043,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -5121,14 +5112,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -5141,7 +5132,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -5173,14 +5164,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -5200,7 +5191,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5250,7 +5241,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5266,12 +5257,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5291,12 +5282,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5305,7 +5296,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5327,8 +5318,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5342,7 +5340,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5364,7 +5362,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5392,12 +5390,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5414,7 +5412,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -5558,7 +5557,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5670,7 +5669,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5819,7 +5818,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -6340,6 +6339,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6386,9 +6386,17 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6513,7 +6521,7 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6523,7 +6531,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6656,7 +6664,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6831,7 +6839,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6851,7 +6859,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6879,6 +6887,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6901,11 +6910,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6920,7 +6924,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6932,7 +6936,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6964,7 +6968,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -7414,6 +7418,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -7423,7 +7428,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7482,7 +7487,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7574,7 +7579,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7616,7 +7621,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
@@ -7632,7 +7637,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7691,7 +7696,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
@@ -7899,6 +7904,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_FULL;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -8356,7 +8378,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder)
+ if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
@@ -8599,7 +8622,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
@@ -8607,7 +8630,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
res = i2c_add_adapter(&i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
@@ -8621,7 +8644,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -9111,7 +9134,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -9119,7 +9142,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -9206,13 +9229,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -9463,7 +9486,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9979,20 +10002,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -10213,7 +10236,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10305,7 +10328,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10513,16 +10536,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10530,9 +10557,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10540,7 +10569,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10624,7 +10653,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10740,6 +10769,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10769,8 +10799,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10793,7 +10823,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10831,7 +10861,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10869,7 +10899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10890,7 +10920,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -11743,7 +11773,7 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
old_plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
- DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
return false;
}
@@ -12189,7 +12219,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
@@ -12312,7 +12342,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -12320,7 +12350,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -12332,7 +12362,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12548,7 +12578,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12733,7 +12763,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12743,31 +12773,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
@@ -12778,6 +12801,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12796,7 +12892,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 385faaca6e26..b937da0a4e4a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -81,6 +81,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -276,6 +277,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -614,6 +619,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -624,8 +636,9 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
/**
* @oem_i2c:
@@ -633,6 +646,16 @@ struct amdgpu_display_manager {
* OEM i2c bus
*/
struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -1012,6 +1035,14 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
@@ -1045,4 +1076,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e8bdd7f0c460..010172f930ae 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -246,8 +246,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
- struct amdgpu_device *adev = drm_to_adev(dm->ddev);
- int r;
mutex_lock(&dm->dc_lock);
@@ -275,15 +273,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0) {
- r = amdgpu_dpm_pause_power_profile(adev, true);
- if (r)
- dev_warn(adev->dev, "failed to set default power profile mode\n");
+ if (dm->active_vblank_irq_count == 0)
dc_allow_idle_optimizations(dm->dc, true);
- r = amdgpu_dpm_pause_power_profile(adev, false);
- if (r)
- dev_warn(adev->dev, "failed to restore the power profile mode\n");
- }
mutex_unlock(&dm->dc_lock);
@@ -670,6 +661,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
@@ -737,7 +737,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
* support programmable degamma anywhere.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ /* Dont't enable DRM CRTC degamma property for DCN401 since the
+ * pre-blending degamma LUT doesn't apply to cursor, and therefore
+ * can't work similar to a post-blending degamma LUT as in other hw
+ * versions.
+ * TODO: revisit it once KMS plane color API is merged.
+ */
+ drm_crtc_enable_color_mgmt(&acrtc->base,
+ (is_dcn &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
+ MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index c7d13e743e6c..b726bcd18e29 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8f22ad966543..b1d1897f5eaf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -26,6 +26,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +77,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -694,8 +723,8 @@ ret:
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
- .write_new = srm_data_write,
- .read_new = srm_data_read,
+ .write = srm_data_write,
+ .read = srm_data_read,
};
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
@@ -732,7 +761,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
@@ -740,12 +772,22 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_6 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ config->psp.caps.dtm_v3_supported = 1;
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+
+ config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
+ if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
+ } else {
+ ddc_funcs->atomic_write_poll_read_i2c = NULL;
+ ddc_funcs->atomic_write_poll_read_aux = NULL;
+ }
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1395a748d726..9e3e51a2dc49 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -630,6 +630,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -1016,6 +1029,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
return EDID_NO_RESPONSE;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+ if (!edid ||
+ edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
+ return EDID_BAD_INPUT;
+
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7ceedf626d23..7187d5aedf0a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -59,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -87,15 +96,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
+ result = payload.data[0];
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +133,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
@@ -1713,16 +1739,17 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3e0f45f1711c..eef51652ca35 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -92,9 +92,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
@@ -948,13 +948,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 615d2ab2b803..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index e140b7a04d72..ff7b867ae98b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -87,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
@@ -127,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..95f890fda8aa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -726,6 +726,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3e1f5b689718..3c9ecea7eebc 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -53,31 +53,30 @@ DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
DC_LIBS += sspl
-DC_SPL_TRANS += dc_spl_translate.o
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o)
endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
+FILES =
+FILES += dc_dmub_srv.o
+FILES += dc_edid_parser.o
+FILES += dc_fused_io.o
+FILES += dc_helper.o
+FILES += core/dc.o
+FILES += core/dc_debug.o
+FILES += core/dc_hw_sequencer.o
+FILES += core/dc_link_enc_cfg.o
+FILES += core/dc_link_exports.o
+FILES += core/dc_resource.o
+FILES += core/dc_sink.o
+FILES += core/dc_stat.o
+FILES += core/dc_state.o
+FILES += core/dc_stream.o
+FILES += core/dc_surface.o
+FILES += core/dc_vm_helper.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES))
-DISPLAY_CORE += dc_vm_helper.o
-
-AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
-
-AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
-
-AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
-
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
-AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-DC_DMUB += dc_dmub_srv.o
-DC_EDID += dc_edid_parser.o
-AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
-
-AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 681799468487..d897f8a30ede 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1393,7 +1393,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
@@ -1407,7 +1407,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 88d3f9d7dd55..452206b5095e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
-
result = div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
* @note
* Good idea to use Newton's method
*/
-
- ASSERT(arg.value);
-
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 3bacf470f7c5..67f08495b7e6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2384,10 +2384,10 @@ static enum bp_result get_integrated_info_v8(
}
/*
- * get_integrated_info_v8
+ * get_integrated_info_v9
*
* @brief
- * Get V8 integrated BIOS information
+ * Get V9 integrated BIOS information
*
* @param
* bios_parser *bp - [in]BIOS parser handler to get master data table
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2c645dffec18..f2b1720a6a66 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7(
process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
}
+ dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction;
}
// Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d9955c5d2e5e..60021671b386 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
###############################################################################
# DCN30
###############################################################################
-CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c3e58c730b1..4071851f9e86 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- if (!stream->dpms_off || (stream_status && stream_status->plane_count))
+ if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
display_count++;
}
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..f5ad0a177038 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 19897fa52e7e..d82a52319088 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
return actual_dispclk_set_mhz * 1000;
}
-
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
index 083cb3158859..81d7c912549c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -27,6 +27,5 @@
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 23b390245b5d..5a633333dbb5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
return actual_dispclk_set_mhz * 1000;
}
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
-
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 1ce19d875358..f76fad87f0e1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -30,7 +30,6 @@ enum dcn_pwr_state;
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
index fa09c594fd36..06da34676965 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -56,6 +56,7 @@
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
#define DALSMC_MSG_BacoAudioD3PME 0x11
-#define DALSMC_Message_Count 0x12
+#define DALSMC_MSG_SmartAccess 0x12
+#define DALSMC_Message_Count 0x13
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 8083a553c60e..ef77fcd164ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -30,6 +30,7 @@
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
+#include "dcn30/dcn30m_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_present
+ .is_smu_present = dcn3_is_smu_present,
+ .set_smartmux_switch = dcn30m_set_smartmux_switch
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
index 7ac87ef26aec..8e8a11c7437e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,19 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-#include <nvif/class.h>
+#include "clk_mgr_internal.h"
+#include "dcn30/dcn30m_clk_mgr.h"
+#include "dcn30m_clk_mgr_smu_msg.h"
-static const struct nvkm_engine_func
-ad102_ofa = {
- .sclass = {
- { -1, -1, NVC9FA_VIDEO_OFA },
- {}
- }
-};
-int
-ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- return -ENODEV;
+ return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
index d72b3aae9a2b..757985b2eadc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,14 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-static const struct nvkm_engine_func
-ad102_nvdec = {
- .sclass = {
- { -1, -1, NVC9B0_VIDEO_DECODER },
- {}
- }
-};
+#ifndef __DCN30M_CLK_MGR_H__
+#define __DCN30M_CLK_MGR_H__
-int
-ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
- return -ENODEV;
-}
+#endif //__DCN30M_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..0dd0583ff21e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
+ unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ uint32_t result;
+ /* Wait for response register to be ready */
+ dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ if (IS_SMU_TIMEOUT(result))
+ dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
+
+ /* Wait for response */
+ if (result == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
+
+ dcn30m_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SmartAccess, pins_to_set, &response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
index ef474f61a1b5..8a59a473fc5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,17 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-#include <nvif/class.h>
+#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
-static const struct nvkm_engine_func
-ga100_ofa = {
- .sclass = {
- { -1, -1, NVC6FA_VIDEO_OFA },
- {}
- }
-};
+#include "core_types.h"
-int
-ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
+struct clk_mgr_internal;
- return -ENODEV;
-}
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
+#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 2d14346b680e..478b4d6a3544 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+
+#define CTX clk_mgr->base.ctx
+#define IND_REG(offset) offset
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
@@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define FN(reg_name, field) \
FD(reg_name##__##field)
-#define REG_NBIO(reg_name) \
- (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define mmMP1_C2PMSG_3 0x3B1050C
+#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define reg__MP1_C2PMSG_3__SHIFT (0)
+
+
+#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define data_reg_name__MP1_C2PMSG_3__SHIFT (0)
+
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
@@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param(
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
- read_back_data = generic_read_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3);
+ IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0,
+ MP1_C2PMSG_3, msg_id);
+ IX_REG_GET_SYNC(mmMP1_C2PMSG_3,
+ MP1_C2PMSG_3, &read_back_data);
if (read_back_data == msg_id)
break;
udelay(2);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
index 6a6ae618650b..4607eff07253 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -65,6 +65,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 142de8938d7c..bb1ac12a2b09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -90,6 +90,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B24B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
@@ -116,6 +117,7 @@
#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#undef FN
@@ -596,7 +598,11 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
uint32_t ssc_enable;
- ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) {
+ ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK;
+ } else {
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ }
return ssc_enable != 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index f6f0e6a33001..604d256cb47a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -84,8 +84,8 @@
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
-#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
-#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU
+#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
@@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_DispPsrExit,
+ VBIOSSMC_MSG_DispIPS2Exit,
0);
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index a3b8e3d4a429..b59703467128 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -22,8 +22,6 @@
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
-#include "dml/dcn401/dcn401_fpu.h"
-
#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
@@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
{
- /* legacy */
- DC_FP_START();
- dcn401_build_wm_range_table_fpu(clk_mgr);
- DC_FP_END();
-
- if (clk_mgr->ctx->dc->debug.using_dml21) {
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
-
- /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
- } else {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
- }
-
- /* Set 1B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
+ /* For min clocks use as reported by PM FW and report those as min */
+ uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
+ uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+
+ /* Set A - Normal - default values */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
+
+ /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
+ /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
+ if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
+ } else {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
}
+
+ /* Set 1B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
}
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->smu_present && clk_mgr->dpm_present &&
+ ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz));
+}
+
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
@@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
.get_hard_min_memclk = dcn401_get_hard_min_memclk,
.get_hard_min_fclk = dcn401_get_hard_min_fclk,
+ .is_dc_mode_present = dcn401_is_dc_mode_present,
+ .get_max_clock_khz = dcn401_get_max_clock_khz,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
@@ -1565,7 +1606,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
- kfree(clk_mgr);
+ kfree(clk_mgr401);
return NULL;
}
@@ -1576,6 +1617,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr401);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 6c9ae5ca2c7e..97a1ce1e8a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -105,10 +105,13 @@ struct dcn401_clk_mgr {
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base);
struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
struct dccg *dccg);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
+
#endif /* __DCN401_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 28d1353f403d..9ab0ee20ca6f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,7 +36,9 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane.h"
#include "dc_plane_priv.h"
+#include "dc_stream_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -239,6 +241,7 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
/* Create a link for each usb4 dpia port */
+ dc->lowest_dpia_link_index = MAX_LINKS;
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -251,6 +254,9 @@ static bool create_links(
link = dc->link_srv->create_link(&link_init_params);
if (link) {
+ if (dc->lowest_dpia_link_index > dc->link_count)
+ dc->lowest_dpia_link_index = dc->link_count;
+
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -439,9 +445,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
- if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->ctx->dce_version > DCE_VERSION_MAX) {
+ if (dc->optimized_required || dc->wm_optimized_required) {
+ stream->adjust.timing_adjust_pending = true;
return false;
+ }
+ }
dc_exit_ips_for_hw_access(dc);
@@ -929,17 +938,18 @@ static void dc_destruct(struct dc *dc)
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
-
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -967,6 +977,8 @@ static bool dc_construct_ctx(struct dc *dc,
if (!dc_ctx)
return false;
+ dc_stream_init_rmcm_3dlut(dc);
+
dc_ctx->cgs_device = init_params->cgs_device;
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
@@ -1192,6 +1204,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) {
+ memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color,
+ sizeof(pipe_ctx->visual_confirm_color));
+ return;
+ }
+
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1225,6 +1243,51 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
}
}
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color)
+{
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream_state);
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ struct dc_plane_state *plane_state = NULL;
+
+ if (!stream_status)
+ return;
+
+ switch (dc->debug.visual_confirm) {
+ case VISUAL_CONFIRM_DISABLE:
+ return;
+ case VISUAL_CONFIRM_PSR:
+ case VISUAL_CONFIRM_FAMS:
+ pipe_ctx = dc_stream_get_pipe_ctx(stream_state);
+ if (!pipe_ctx)
+ return;
+ dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
+ memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color));
+ return;
+
+ default:
+ /* find plane with highest layer_index */
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i]->visible)
+ plane_state = stream_status->plane_states[i];
+ }
+ if (!plane_state)
+ return;
+ /* find pipe that contains plane with highest layer index */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state == plane_state) {
+ memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color));
+ return;
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -2053,6 +2116,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.enable_accelerated_mode(dc, context);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
+ }
+
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
@@ -2117,6 +2192,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
@@ -2258,11 +2341,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ struct dc_sink *sink = stream->sink;
/* revalidate streams */
- res = dc_validate_stream(dc, stream);
- if (res != DC_OK)
- return res;
+ if (!dc_is_virtual_signal(sink->sink_signal)) {
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+ }
+
dc_stream_log(dc, stream);
@@ -2297,7 +2384,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
- res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING);
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
@@ -2815,7 +2902,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
- if (dc->idle_optimizations_allowed)
+ if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
overall_type = UPDATE_TYPE_FULL;
if (stream_status == NULL || stream_status->plane_count != surface_count)
@@ -3168,7 +3255,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->crtc_timing_adjust) {
if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
- stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
+ stream->adjust.timing_adjust_pending)
update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
update->crtc_timing_adjust->timing_adjust_pending = false;
@@ -3219,7 +3307,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context,
+ DC_VALIDATE_MODE_ONLY) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3248,7 +3337,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- scratch->plane_states[i] = *status->plane_states[i];
+ dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3264,10 +3353,7 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- /* refcount will always be valid, restore everything else */
- struct kref refcount = status->plane_states[i]->refcount;
- *status->plane_states[i] = scratch->plane_states[i];
- status->plane_states[i]->refcount = refcount;
+ dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
*stream = scratch->stream_state;
}
@@ -3444,7 +3530,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3998,6 +4084,7 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4048,6 +4135,9 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
@@ -4168,12 +4258,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*turn off triple buffer for full update*/
- dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
- }
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4182,8 +4266,37 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -4523,7 +4636,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context,
+ DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4958,6 +5072,9 @@ static bool full_update_required(struct dc *dc,
if (dc->idle_optimizations_allowed)
return true;
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
return false;
}
@@ -5043,7 +5160,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -5327,8 +5444,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
-
- if (ret)
+ if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_01))
clear_update_flags(srf_updates, surface_count, stream);
return ret;
@@ -5359,7 +5476,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
stream_update, state);
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
}
@@ -5432,6 +5549,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
break;
+ case DC_ACPI_CM_POWER_STATE_D3:
+ if (dc->caps.ips_support)
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dc->caps.ips_v2_support) {
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+ }
+ break;
default:
ASSERT(dc->current_state->stream_count == 0);
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
@@ -6187,15 +6313,22 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_sw_cursor_req = false;
+ unsigned int max_cursor_size = dc->caps.max_cursor_size;
+ unsigned int stream_cursor_size;
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
- subvp_sw_cursor_req = true;
- break;
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc,
+ dc->current_state,
+ dc->current_state->streams[i]);
+
+ if (stream_cursor_size < max_cursor_size) {
+ max_cursor_size = stream_cursor_size;
+ }
}
}
- properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+
+ properties->cursor_size_limit = max_cursor_size;
}
/**
@@ -6222,13 +6355,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
-/*
- *****************************************************************************
+/**
* dc_get_power_profile_for_dc_state() - extracts power profile from dc state
*
* Called when DM wants to make power policy decisions based on dc_state
*
- *****************************************************************************
+ * @context: Pointer to the dc_state from which the power profile is extracted.
+ *
+ * Return: The power profile structure containing the power level information.
*/
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
{
@@ -6244,13 +6378,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
-/*
- **********************************************************************************
+/**
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
- * Called when DM wants to log detile buffer size from dc_state
+ * This function is called to log the detile buffer size from the dc_state.
*
- **********************************************************************************
+ * @context: a pointer to the dc_state from which the detile buffer size is extracted.
+ *
+ * Return: the size of the detile buffer, or 0 if not available.
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
@@ -6261,3 +6396,57 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
+/**
+ * dc_get_host_router_index: Get index of host router from a dpia link
+ *
+ * This function return a host router index of the target link. If the target link is dpia link.
+ *
+ * @link: Pointer to the target link (input)
+ * @host_router_index: Pointer to store the host router index of the target link (output).
+ *
+ * Return: true if the host router index is found and valid.
+ *
+ */
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
+{
+ struct dc *dc;
+
+ if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ dc = link->ctx->dc;
+
+ if (link->link_index < dc->lowest_dpia_link_index)
+ return false;
+
+ *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
+ if (*host_router_index < dc->caps.num_of_host_routers)
+ return true;
+ else
+ return false;
+}
+
+bool dc_is_cursor_limit_pending(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_can_clear_cursor_limit(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 650e89825968..bbce751b485f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -266,6 +266,10 @@ char *dc_status_to_str(enum dc_status status)
return "Fail dp payload allocation";
case DC_FAIL_DP_LINK_BANDWIDTH:
return "Insufficient DP link bandwidth";
+ case DC_FAIL_HW_CURSOR_SUPPORT:
+ return "HW Cursor not supported";
+ case DC_FAIL_DP_TUNNEL_BW_VALIDATE:
+ return "Fail DP Tunnel BW validation";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 55b32dfbfdd6..ec4e80e5b6eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for Smart Mux */
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ const struct tg_color sm_ver_colors[5] = {
+ {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
+ {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
+ };
+
+ if (dc->caps.is_apu) {
+ /* APU driving the eDP */
+ *color = sm_ver_colors[dc->config.smart_mux_version];
+ } else {
+ /* dGPU driving the eDP - red */
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
/* Visual Confirm color definition for VABC */
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
@@ -697,7 +723,7 @@ void get_fams2_visual_confirm_color(
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -896,7 +922,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps)
{
unsigned int i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 71e15da4bb69..130455f2802a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 313a32248cd7..4d6181e7c612 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
data->viewport_c.y += src.y / vpc_div;
}
-static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
-{
- uint32_t refresh_rate;
- struct dc *dc = stream->ctx->dc;
-
- refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
- stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
- refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
- refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
-
- /* If there's any stream that fits the SubVP high refresh criteria,
- * we must return true. This is because cursor updates are asynchronous
- * with full updates, so we could transition into a SubVP config and
- * remain in HW cursor mode if there's no cursor update which will
- * then cause corruption.
- */
- if ((refresh_rate >= 120 && refresh_rate <= 175 &&
- stream->timing.v_addressable >= 1080 &&
- stream->timing.v_addressable <= 2160) &&
- (dc->current_state->stream_count > 1 ||
- (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
- return true;
-
- return false;
-}
-
static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
enum dp_test_pattern test_pattern)
{
@@ -3937,6 +3911,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
+
+ dc->link_srv->dp_decide_tunnel_settings(stream,
+ &pipe_ctx->link_config.dp_tunnel_settings);
+
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -3962,7 +3940,9 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ stream->audio_info.mode_count &&
+ (stream->audio_info.flags.all ||
+ (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
@@ -4075,7 +4055,7 @@ static bool add_all_planes_for_stream(
* @set: An array of dc_validation_set with all the current streams reference
* @set_count: Total of streams
* @context: New context
- * @fast_validate: Enable or disable fast validation
+ * @validate_mode: identify the validation mode
*
* This function updates the potential new stream in the context object. It
* creates multiple lists for the add, remove, and unchanged streams. In
@@ -4090,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
@@ -4259,7 +4239,12 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
- res = dc_validate_global_state(dc, context, fast_validate);
+ /* clear subvp cursor limitations */
+ for (i = 0; i < context->stream_count; i++) {
+ dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
+ }
+
+ res = dc_validate_global_state(dc, context, validate_mode);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
@@ -4316,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
*
* @dc: dc struct for this driver
* @new_ctx: state to be validated
- * @fast_validate: set to true if only yes/no to support matters
+ * @validate_mode: identify the validation mode
*
* Checks hardware resource availability and bandwidth requirement.
*
@@ -4326,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -4385,8 +4370,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode);
return result;
}
@@ -5538,23 +5522,17 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
{
- if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
- return true;
- if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
-
- return false;
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
-struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
- return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+ if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
+ context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
+
+ return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
@@ -5576,6 +5554,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
+ dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 1b2cce127981..883054bb18e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include "dc_types.h"
#include "core_types.h"
#include "core_status.h"
#include "dc_state.h"
@@ -193,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
struct dc_state *state;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-#endif
state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
@@ -210,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
- dml2_opt->use_clock_dc_limits = false;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
dc_state_release(state);
return NULL;
}
- dml2_opt->use_clock_dc_limits = true;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
@@ -433,6 +427,8 @@ enum dc_status dc_state_remove_stream(
return DC_ERROR_UNEXPECTED;
}
+ dc_stream_release_3dlut_for_stream(dc, stream);
+
dc_stream_release(state->streams[i]);
state->stream_count--;
@@ -812,8 +808,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
if (phantom_stream_status) {
phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false;
+ phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false;
}
+ dc_state_set_stream_subvp_cursor_limit(main_stream, state, true);
+
return res;
}
@@ -939,13 +939,20 @@ void dc_state_release_phantom_streams_and_planes(
const struct dc *dc,
struct dc_state *state)
{
+ unsigned int phantom_count;
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
int i;
- for (i = 0; i < state->phantom_stream_count; i++)
- dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+ phantom_count = state->phantom_stream_count;
+ memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_stream(dc, state, phantom_streams[i]);
- for (i = 0; i < state->phantom_plane_count; i++)
- dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+ phantom_count = state->phantom_plane_count;
+ memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_plane(dc, state, phantom_planes[i]);
}
struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
@@ -977,3 +984,94 @@ bool dc_state_is_fams2_in_use(
return is_fams2_in_use;
}
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.subvp_limit_cursor_size = limit;
+ }
+}
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.subvp_limit_cursor_size;
+ }
+
+ return limit;
+}
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.cursor_size_limit_subvp = limit;
+ }
+}
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.cursor_size_limit_subvp;
+ }
+
+ return limit;
+}
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool can_clear_limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) &&
+ (stream_status->mall_stream_config.type == SUBVP_PHANTOM ||
+ stream->hw_cursor_req ||
+ !stream_status->mall_stream_config.subvp_limit_cursor_size ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes));
+ }
+
+ return can_clear_limit;
+}
+
+bool dc_state_is_subvp_in_use(struct dc_state *state)
+{
+ uint32_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0478dd856d8c..4d6bc9fd4faa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -265,13 +265,16 @@ void program_cursor_attributes(
}
/*
- * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address
*/
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
const struct dc_cursor_attributes *attributes)
{
- struct dc *dc;
+ const struct dc *dc;
+
+ unsigned int max_cursor_size;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
- * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
- * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
+ /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM.
+ * Therefore, if cursor is greater than this, fallback to SW cursor.
*/
- if (dc->debug.allow_sw_cursor_fallback &&
- attributes->height * attributes->width * 4 > 16384 &&
- !stream->hw_cursor_req) {
- if (check_subvp_sw_cursor_fallback_req(dc, stream))
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream);
+ max_cursor_size = max_cursor_size * max_cursor_size * 4;
+
+ if (attributes->height * attributes->width * 4 > max_cursor_size) {
return false;
+ }
}
- stream->cursor_attributes = *attributes;
-
return true;
}
+/*
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ bool result = false;
+
+ if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
+ stream->cursor_attributes = *attributes;
+ result = true;
+ }
+
+ return result;
+}
+
bool dc_stream_program_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
@@ -552,6 +569,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return true;
}
+/**
+ * dc_stream_remove_writeback() - Disables writeback and removes writeback info.
+ * @dc: Display core control structure.
+ * @stream: Display core stream state.
+ * @dwb_pipe_inst: Display writeback pipe.
+ *
+ * Return: returns true on success, false otherwise.
+ */
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
@@ -832,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
}
/*
+* dc_stream_get_3dlut()
+* Requirements:
+* 1. Is stream already owns an RMCM instance, return it.
+* 2. If it doesn't and we don't need to allocate, return NULL.
+* 3. If there's a free RMCM instance, assign to stream and return it.
+* 4. If no free RMCM instances, return NULL.
+*/
+
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ // see if one is allocated for this stream
+ for (int i = 0; i < num_rmcm; i++) {
+ if (dc->res_pool->rmcm_3dlut[i].isInUse &&
+ dc->res_pool->rmcm_3dlut[i].stream == stream)
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+
+ //case: not found one, and dont need to allocate
+ if (!allocate_one)
+ return NULL;
+
+ //see if there is an unused 3dlut, allocate
+ for (int i = 0; i < num_rmcm; i++) {
+ if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = true;
+ dc->res_pool->rmcm_3dlut[i].stream = stream;
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+ }
+
+ //dont have a 3dlut
+ return NULL;
+}
+
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct dc_rmcm_3dlut *rmcm_3dlut =
+ dc_stream_get_3dlut_for_stream(dc, stream, false);
+
+ if (rmcm_3dlut) {
+ rmcm_3dlut->isInUse = false;
+ rmcm_3dlut->stream = NULL;
+ rmcm_3dlut->protection_bits = 0;
+ }
+}
+
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ for (int i = 0; i < num_rmcm; i++) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = false;
+ dc->res_pool->rmcm_3dlut[i].stream = NULL;
+ dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
+ }
+}
+
+/*
* Finds the greatest index in refresh_rate_hz that contains a value <= refresh
*/
static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh)
@@ -1109,3 +1201,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str
return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false);
}
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool is_limit_pending = false;
+
+ if (dc->current_state)
+ is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state);
+
+ return is_limit_pending;
+}
+
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool can_clear_limit = false;
+
+ if (dc->current_state)
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) &&
+ (stream->hw_cursor_req ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes));
+
+ return can_clear_limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6fcc21bb9bc..922f23557f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state)
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- if (pipe_ctx->plane_state)
+ if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.address)
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
+{
+ struct kref temp_refcount;
+
+ /* backup persistent info */
+ memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
+
+ /* copy all configuration information */
+ memcpy(dst, src, sizeof(struct dc_plane_state));
+
+ /* restore persistent info */
+ memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7c2ee0526926..59c07756130d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -46,6 +46,8 @@
#include "dmub/inc/dmub_cmd.h"
+#include "sspl/dc_spl_types.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -53,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.325"
+#define DC_VER "3.2.340"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -66,7 +68,11 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
-#define MAX_HOST_ROUTERS_NUM 2
+#define MAX_SUPPORTED_FORMATS 7
+
+#define MAX_HOST_ROUTERS_NUM 3
+#define MAX_DPIA_PER_HOST_ROUTER 3
+#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
/* Display Core Interfaces */
struct dc_versions {
@@ -192,6 +198,34 @@ struct dpp_color_caps {
struct rom_curve_caps ogam_rom_caps;
};
+/* Below structure is to describe the HW support for mem layout, extend support
+ range to match what OS could handle in the roadmap */
+struct lut3d_caps {
+ uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */
+ struct {
+ uint32_t swizzle_3d_rgb : 1;
+ uint32_t swizzle_3d_bgr : 1;
+ uint32_t linear_1d : 1;
+ } mem_layout_support;
+ struct {
+ uint32_t unorm_12msb : 1;
+ uint32_t unorm_12lsb : 1;
+ uint32_t float_fp1_5_10 : 1;
+ } mem_format_support;
+ struct {
+ uint32_t order_rgba : 1;
+ uint32_t order_bgra : 1;
+ } mem_pixel_order_support;
+ /*< size options are 9, 17, 33, 45, 65 */
+ struct {
+ uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */
+ uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */
+ uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */
+ uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */
+ uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */
+ } lut_dim_caps;
+};
+
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
@@ -203,14 +237,21 @@ struct dpp_color_caps {
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ * @mcm_3d_lut_caps: HW support cap for MCM LUT memory
+ * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory
+ * @preblend: whether color manager supports preblend with MPC
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
+ uint16_t num_rmcm_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
+ struct lut3d_caps mcm_3d_lut_caps;
+ struct lut3d_caps rmcm_3d_lut_caps;
+ bool preblend;
};
/**
@@ -249,6 +290,7 @@ struct dc_caps {
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
+ unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
@@ -269,6 +311,7 @@ struct dc_caps {
bool dmcub_support;
bool zstate_support;
bool ips_support;
+ bool ips_v2_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -282,6 +325,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ bool fused_io_supported;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
@@ -303,6 +347,10 @@ struct dc_caps {
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
+ uint8_t num_of_host_routers;
+ uint8_t num_of_dpias_per_host_router;
+ /* limit of the ODM only, could be limited by other factors (like pipe count)*/
+ uint8_t max_odm_combine_factor;
};
struct dc_bug_wa {
@@ -447,6 +495,7 @@ struct dc_config {
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
+ bool skip_riommu_prefetch_wa;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -456,6 +505,7 @@ struct dc_config {
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
+ int smart_mux_version;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
@@ -466,6 +516,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_rcg;
unsigned int disable_ips_in_vpb;
bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
@@ -478,6 +529,8 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ struct spl_sharpness_range dcn_sharpness_range;
+ struct spl_sharpness_range dcn_override_sharpness_range;
};
enum visual_confirm {
@@ -489,6 +542,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
@@ -496,6 +550,7 @@ enum visual_confirm {
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
VISUAL_CONFIRM_DCC = 22,
+ VISUAL_CONFIRM_EXPLICIT = 0x80000000,
};
enum dc_psr_power_opts {
@@ -766,6 +821,7 @@ enum pg_hw_resources {
PG_DCHVM,
PG_DWB,
PG_HPO,
+ PG_DCOH,
PG_HW_RESOURCES_NUM_ELEMENT
};
@@ -782,10 +838,8 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t enable_dpia_pre_training:1; /* bit 7 */
- uint32_t unify_link_enc_assignment:1; /* bit 8 */
- uint32_t reserved:24;
+ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -902,12 +956,18 @@ struct dc_debug_options {
bool voltage_align_fclk;
bool disable_min_fclk;
+ bool hdcp_lc_force_fw_enable;
+ bool hdcp_lc_enable_sw_fallback;
+
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
+ bool disable_io_clk_power_gate;
+ bool disable_mem_power_gate;
+ bool disable_dio_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
@@ -1144,7 +1204,7 @@ struct dc_init_data {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
@@ -1245,6 +1305,12 @@ union dc_3dlut_state {
};
+struct dc_rmcm_3dlut {
+ bool isInUse;
+ const struct dc_stream_state *stream;
+ uint8_t protection_bits;
+};
+
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
@@ -1382,6 +1448,8 @@ struct dc_plane_state {
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
+ struct spl_sharpness_range sharpness_range;
+ enum sharpness_range_source sharpness_source;
};
struct dc_plane_info {
@@ -1418,6 +1486,174 @@ struct dc_scratch_space {
struct dc_stream_state stream_state;
};
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+ struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ enum dc_irq_source irq_source_read_request;/* Read Request */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
+
+ bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
+ enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
+ bool skip_phy_ssc_reduction;
+ } wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool is_dds;
+ bool is_display_mux_present;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+ bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1431,6 +1667,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
+ uint8_t lowest_dpia_link_index;
struct link_service *link_srv;
struct dc_state *current_state;
@@ -1460,6 +1697,10 @@ struct dc {
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ /* For eDP to know the switching state of SmartMux */
+ bool is_switch_in_progress_orig;
+ bool is_switch_in_progress_dest;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -1485,11 +1726,12 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ struct dc_link temp_link;
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
struct dml2_configuration_options dml2_options;
- struct dml2_configuration_options dml2_tmp;
+ struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
};
@@ -1594,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
-/*
- * fast_validate: we return after determining if we can support the new state,
- * but before we populate the programming info
- */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
@@ -1651,170 +1889,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
-
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool skip_stream_reenable;
- bool is_internal_display;
- /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
-
- /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
- * for every link training. This is incompatible with DP LL compliance automation,
- * which expects the same link settings to be used every retry on a link loss.
- * This flag is used to skip the fallback when link loss occurs during automation.
- */
- bool skip_fallback_on_link_loss;
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
- enum engine_id dpia_preferred_eng_id;
-
- bool test_pattern_enabled;
- /* Pending/Current test pattern are only used to perform and track
- * FIXED_VS retimer test pattern/lane adjustment override state.
- * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
- * to perform specific lane adjust overrides before setting certain
- * PHY test patterns. In cases when lane adjust and set test pattern
- * calls are not performed atomically (i.e. performing link training),
- * pending_test_pattern will be invalid or contain a non-PHY test pattern
- * and current_test_pattern will contain required context for any future
- * set pattern/set lane adjust to transition between override state(s).
- * */
- enum dp_test_pattern current_test_pattern;
- enum dp_test_pattern pending_test_pattern;
-
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- enum dp_panel_mode panel_mode;
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- struct hdcp_caps hdcp_caps;
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
- struct replay_settings replay_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- bool blank_stream_on_ocs_change;
- bool read_dpcd204h_on_irq_hpd;
- bool force_dp_ffe_preset;
- } wa_flags;
- union dc_dp_ffe_preset forced_dp_ffe_preset;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
- uint32_t phy_transition_bitmask;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
- bool skip_implict_edp_power_control;
- enum backlight_control_type backlight_control_type;
-};
-
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
@@ -2366,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
- * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
- * available BW for each host router
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * @dc: pointer to dc struct
- * @stream: pointer to all possible streams
- * @count: number of valid DPIA streams
- *
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: dc_status
*/
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
- const unsigned int count);
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
/* Sink Interfaces - A sink corresponds to a display output device */
@@ -2586,13 +2655,23 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+
/* DSC Interfaces */
#include "dc_dsc.h"
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color);
+
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
+bool dc_is_cursor_limit_pending(struct dc *dc);
+bool dc_can_clear_cursor_limit(struct dc *dc);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 614e03bfd598..f5ef1a07078e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
+#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+
+ if (res)
+ res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -202,7 +278,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true;
- dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -210,8 +287,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
-
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -1243,7 +1257,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1255,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
@@ -1272,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else {
/* RCG only */
new_signals.bits.allow_pg = 0;
@@ -1279,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips2 = 0;
new_signals.bits.allow_z10 = 0;
}
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ }
+ // Setting RCG allow bits (IPSv2.0)
+ if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ }
+ // IPS dynamic allow bits (IPSv2 change, vpb use case)
+ if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ new_signals.bits.allow_dynamic_ips1_z8 = 1;
}
-
ips_driver->signals = new_signals;
dc_dmub_srv->driver_signals = ips_driver->signals;
}
@@ -1304,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
- uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1324,45 +1364,49 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
rcg_exit_count = ips_fw->rcg_exit_count;
ips1_exit_count = ips_fw->ips1_exit_count;
ips2_exit_count = ips_fw->ips2_exit_count;
+ ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
ips_driver->signals.all = 0;
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit,
ips_fw->rcg_entry_count,
ips_fw->ips1_entry_count,
- ips_fw->ips2_entry_count);
+ ips_fw->ips2_entry_count,
+ ips_fw->ips1_z8ret_entry_count);
/* Note: register access has technically not resumed for DCN here, but we
* need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
- if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
- ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- if (ips_fw->signals.bits.ips2_commit) {
- DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
- ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- // Tell PMFW to exit low power state
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
@@ -1400,35 +1444,38 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
- ips2_exit_count);
+ ips2_exit_count,
+ ips1z8_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
@@ -1641,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
return result;
}
-void dc_dmub_srv_fams2_update_config(struct dc *dc,
+static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
struct dc_state *context,
bool enable)
{
@@ -1654,7 +1701,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1673,11 +1721,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1704,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
+ union dmub_rb_cmd cmd;
+ uint32_t i;
+
+ memset(config, 0, sizeof(*config));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
+
+ cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
+ cmd.ib_fams2_config.ib_data.size = sizeof(*config);
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* copy static feature configuration overrides */
+ config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+ config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+
+ /* send global configuration parameters */
+ memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ /* copy stream static base state */
+ memcpy(&config->stream_v1[i].base,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(config->stream_v1[i].base));
+
+ /* copy stream static sub-state */
+ memcpy(&config->stream_v1[i].sub_state,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
+ sizeof(config->stream_v1[i].sub_state));
+ }
+ }
+
+ config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ config->global.features.bits.enable = enable;
+
+ dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ if (dc->debug.fams_version.major == 2)
+ dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
+ if (dc->debug.fams_version.major == 3)
+ dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
+}
+
void dc_dmub_srv_fams2_drr_update(struct dc *dc,
uint32_t tg_inst,
uint32_t vtotal_min,
@@ -1723,7 +1830,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1759,7 +1867,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
@@ -1827,80 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip(
}
}
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
{
- bool result;
+ union dmub_rb_cmd cmd;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
+ cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
+ cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
+
+ // only panel_inst=0 is supported at the moment
+ cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
+ cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t bytes = sizeof(struct dmub_ips_residency_info);
+
+ dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
+ cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
+ cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
+
+ cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.ips_query_residency_info.info_data.size = bytes;
+ cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
+ cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
+ cmd.ips_query_residency_info.header.ret_status == 0)
return false;
- result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
- start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ // copy the result to the output since ret_status != 0 means the command returned data
+ memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
+ lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Init failed in DMUB");
return result;
}
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count)
{
- uint32_t i;
- enum dmub_gpint_command command_code;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return;
+ memset(&cmd, 0, sizeof(cmd));
- switch (output->ips_mode) {
- case DMUB_IPS_MODE_IPS1_MAX:
- command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS2:
- command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_RCG:
- command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_ONO2_ON:
- command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
- break;
- default:
- command_code = DMUB_GPINT__INVALID_COMMAND;
- break;
- }
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
- if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1
+ lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_x = params.src_x;
+ lsdma_data->u.tiled_copy_data.src_y = params.src_y;
+ lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
+ lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.rect_x = params.rect_x;
+ lsdma_data->u.tiled_copy_data.rect_y = params.rect_y;
+ lsdma_data->u.tiled_copy_data.dcc = params.dcc;
+ lsdma_data->u.tiled_copy_data.tmz = params.tmz;
+ lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
+ lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.data_format = params.data_format;
+ lsdma_data->u.tiled_copy_data.max_com = params.max_com;
+ lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
+ lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
+ lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.data = data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Constfill failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.reg_write_data.reg_addr = reg_addr;
+ lsdma_data->u.reg_write_data.reg_data = reg_data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Poll Reg failed in DMUB");
+
+ return result;
+}
+
+void dc_dmub_srv_release_hw(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd = {0};
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- // send gpint commands and wait for ack
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode),
- &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->residency_percent = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
- (uint16_t)(output->ips_mode),
- &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->entry_counter = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[1] = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[1] = 0;
-
- // NUM_IPS_HISTOGRAM_BUCKETS = 16
- for (i = 0; i < 16; i++)
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->histogram[i] = 0;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
+ cmd.idle_opt_notify_idle.header.payload_bytes =
+ sizeof(cmd.idle_opt_notify_idle) -
+ sizeof(cmd.idle_opt_notify_idle.header);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a636f4c3f01d..8ea320f21269 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
@@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_surface_update *srf_updates,
int surface_count);
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count);
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable);
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data);
+
+struct lsdma_send_tiled_to_tiled_copy_command_params {
+ uint64_t src_addr;
+ uint64_t dst_addr;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t dst_width : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_height : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t data_format : 6;
+ uint32_t swizzle_mode : 5;
+ uint32_t element_size : 3;
+ uint32_t dcc : 1;
+ uint32_t tmz : 1;
+ uint32_t read_compress : 2;
+ uint32_t write_compress : 2;
+ uint32_t max_com : 2;
+ uint32_t max_uncom : 1;
+ uint32_t padding : 9;
+};
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params);
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data);
+
/**
* struct ips_residency_info - struct containing info from dmub_ips_residency_stats
*
@@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
* @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
*/
struct ips_residency_info {
- enum dmub_ips_mode ips_mode;
+ enum ips_residency_mode ips_mode;
unsigned int residency_percent;
unsigned int entry_counter;
unsigned int total_active_time_us[2];
@@ -231,21 +285,16 @@ struct ips_residency_info {
unsigned int histogram[16];
};
-/**
- * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
- *
- * @dc_dmub_srv: The DC DMUB service pointer
- * @start_measurement: Describes whether to start or stop measurement
- *
- * Return: true if GPINT was sent successfully, false otherwise
- */
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement);
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst,
+ struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode);
/**
- * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @output: Output struct to copy the the residency info to
+ * @dc - pointer to DC object
*/
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
+void dc_dmub_srv_release_hw(const struct dc *dc);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 77c87ad57220..5ce1be362534 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -159,6 +159,16 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
+struct dc_tunnel_settings {
+ bool should_enable_dp_tunneling;
+ bool should_use_dp_bw_allocation;
+ uint8_t cm_id;
+ uint8_t group_id;
+ uint32_t bw_granularity;
+ uint32_t estimated_bw;
+ uint32_t allocated_bw;
+};
+
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -943,10 +953,30 @@ union dpia_info {
uint8_t raw;
};
+/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
+union usb4_driver_bw_cap {
+ struct {
+ uint8_t rsvd :7;
+ uint8_t driver_bw_alloc_support :1;
+ } bits;
+ uint8_t raw;
+};
+
+/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */
+union dpia_tunnel_info {
+ struct {
+ uint8_t group_id :3;
+ uint8_t rsvd :5;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
+ union usb4_driver_bw_cap driver_bw_cap;
+ union dpia_tunnel_info dpia_tunnel_info;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1157,8 +1187,8 @@ struct dc_lttpr_caps {
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
- uint8_t lttpr_ieee_oui[3];
- uint8_t lttpr_device_id[6];
+ uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
+ uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
};
struct dc_dongle_dfp_cap_ext {
@@ -1486,5 +1516,11 @@ struct dp_trace {
# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
# endif
+# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
+# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
+# endif
+# ifndef DPTX_BW_ALLOC_MODE_ENABLE
+# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
+# endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
new file mode 100644
index 000000000000..fee69642fb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dc_fused_io.h"
+
+#include "dm_helpers.h"
+#include "gpio.h"
+
+static bool op_i2c_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_i2c *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line,
+ bool over_aux
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = false;
+ loc->ddc_line = ddc_line;
+ loc->over_aux = over_aux;
+ loc->address = op->address;
+ loc->offset = op->offset;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool op_aux_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_aux *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = true;
+ loc->ddc_line = ddc_line;
+ loc->address = op->address;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool atomic_write_poll_read(
+ struct dc_link *link,
+ union dmub_rb_cmd commands[3],
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ const uint8_t count = 3;
+ const uint32_t timeout_per_request_us = 10000;
+ const uint32_t timeout_per_aux_transaction_us = 10000;
+ uint64_t timeout_us = 0;
+
+ commands[1].fused_io.request.poll_mask_msb = poll_mask_msb;
+ commands[1].fused_io.request.timeout_us = poll_timeout_us;
+
+ for (uint8_t i = 0; i < count; i++) {
+ struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE;
+ io->header.multi_cmd_pending = i != count - 1;
+ io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header);
+
+ timeout_us += timeout_per_request_us + io->request.timeout_us;
+ if (!io->request.timeout_us && io->request.u.aux.is_aux)
+ timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16);
+ }
+
+ if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us))
+ return false;
+
+ return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS;
+}
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const bool over_aux = false;
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux)
+ && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux)
+ && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line)
+ && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line)
+ && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
new file mode 100644
index 000000000000..c74917240985
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DC_FUSED_IO_H__
+#define __DC_FUSED_IO_H__
+
+#include "dc.h"
+#include "mod_hdcp.h"
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+#endif // __DC_FUSED_IO_H__
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 8f077e15b4f0..7217de258851 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
char *dce_version_to_string(const int version)
{
switch (version) {
+ case DCE_VERSION_6_0:
+ return "DCE 6.0";
+ case DCE_VERSION_6_1:
+ return "DCE 6.1";
+ case DCE_VERSION_6_4:
+ return "DCE 6.4";
case DCE_VERSION_8_0:
return "DCE 8.0";
case DCE_VERSION_8_1:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index d562ddeca512..667852517246 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -68,7 +68,7 @@ enum dc_plane_addr_type {
struct dc_plane_address {
enum dc_plane_addr_type type;
- bool tmz_surface;
+ uint8_t tmz_surface;
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
@@ -974,6 +974,7 @@ struct dc_crtc_timing {
uint32_t pix_clk_100hz;
uint32_t min_refresh_in_uhz;
+ uint32_t max_refresh_in_uhz;
uint32_t vic;
uint32_t hdmi_vic;
@@ -1103,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select {
enum mpcc_gamut_remap_id {
MPCC_OGAM_GAMUT_REMAP,
MPCC_MCM_FIRST_GAMUT_REMAP,
- MPCC_MCM_SECOND_GAMUT_REMAP
+ MPCC_MCM_SECOND_GAMUT_REMAP,
+ MPCC_RMCM_GAMUT_REMAP,
};
enum cursor_matrix_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index e9413685ed4f..14feb843e694 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
+union dc_plane_status_update_flags {
+ struct {
+ uint32_t address : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
bool clear_tiling);
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index e3a8283b4098..7f57661433eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->adaptive_sharpness.enable = true;
spl_in->adaptive_sharpness.sharpness_level = 0;
} else if (sharpness_setting == SHARPNESS_CUSTOM) {
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+ /* SAT: read harpness_range from dc_plane_state */
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid;
if (force_sharpness_level > 0) {
if (force_sharpness_level > 10)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index 1a12ef579ff4..1d9bae56ff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use(
const struct dc *dc,
const struct dc_state *state);
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_is_subvp_in_use(struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e0bfddaa23e3..5fc6fea211de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -44,6 +44,8 @@ struct mall_stream_config {
*/
enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream
+ bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */
+ bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */
};
struct dc_stream_status {
@@ -503,6 +505,11 @@ void program_cursor_position(
struct dc *dc,
struct dc_stream_state *stream);
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
@@ -572,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one);
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream);
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc);
+
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
@@ -579,4 +597,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
struct dc_state *context);
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream);
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream);
+
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 83ffaae9f439..375ca2f13b7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -175,6 +175,7 @@ struct dc_panel_patch {
unsigned int embedded_tiled_slave;
unsigned int disable_fams;
unsigned int skip_avmute;
+ unsigned int skip_audio_sab_check;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
@@ -210,6 +211,7 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
+ bool rr_capable;
struct dc_panel_patch panel_patch;
};
@@ -262,6 +264,7 @@ enum dc_timing_source {
TIMING_SOURCE_EDID_4BYTE,
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
TIMING_SOURCE_EDID_CEA_RID,
+ TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
TIMING_SOURCE_VBIOS,
TIMING_SOURCE_CV,
TIMING_SOURCE_TV,
@@ -1089,7 +1092,8 @@ union replay_low_refresh_rate_enable_options {
struct {
//BIT[0-3]: Replay Low Hz Support control
unsigned int ENABLE_LOW_RR_SUPPORT :1;
- unsigned int RESERVED_1_3 :3;
+ unsigned int SKIP_ASIC_CHECK :1;
+ unsigned int RESERVED_2_3 :2;
//BIT[4-15]: Replay Low Hz Enable Scenarios
unsigned int ENABLE_STATIC_SCREEN :1;
unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
@@ -1129,6 +1133,10 @@ struct replay_config {
union replay_low_refresh_rate_enable_options low_rr_enable_options;
/* Replay coasting vtotal is within low refresh rate range. */
bool low_rr_activated;
+ /* Replay low refresh rate supported*/
+ bool low_rr_supported;
+ /* Replay Video Conferencing Optimization Enabled */
+ bool replay_video_conferencing_optimization_enabled;
};
/* Replay feature flags*/
@@ -1270,7 +1278,7 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
struct dc_cm2_gpu_mem_parameters {
@@ -1279,6 +1287,7 @@ struct dc_cm2_gpu_mem_parameters {
struct dc_cm2_gpu_mem_format_parameters format_params;
enum dc_cm2_gpu_mem_pixel_component_order component_order;
enum dc_cm2_gpu_mem_size size;
+ uint16_t bit_depth;
};
enum dc_cm2_transfer_func_source {
@@ -1302,6 +1311,11 @@ struct dc_cm2_func_luts {
const struct dc_3dlut *lut3d_func;
struct dc_cm2_gpu_mem_parameters gpu_mem_params;
};
+ bool rmcm_3dlut_shaper_select;
+ bool mpc_3dlut_enable;
+ bool rmcm_3dlut_enable;
+ bool mpc_mcm_post_blend;
+ uint8_t rmcm_tmz;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
@@ -1359,4 +1373,19 @@ struct set_backlight_level_params {
uint8_t aux_inst;
};
+enum dc_validate_mode {
+ /* validate the mode and program HW */
+ DC_VALIDATE_MODE_AND_PROGRAMMING = 0,
+ /* only validate the mode */
+ DC_VALIDATE_MODE_ONLY = 1,
+ /* validate the mode and get the max state (voltage level) */
+ DC_VALIDATE_MODE_AND_STATE_INDEX = 2,
+};
+
+struct dc_validation_dpia_set {
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *tunnel_settings;
+ uint32_t required_bw;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index b363f5360818..58c84f555c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -391,6 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
@@ -411,6 +412,8 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
BREAK_TO_DEBUGGER();
break;
}
+ //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1035,6 +1038,7 @@ static void dccg35_enable_dpp_clk_new(
DPPCLK0_DTO_MODULO, 0xFF);
}
+
static void dccg35_disable_dpp_clk_new(
struct dccg *dccg,
int inst)
@@ -1112,30 +1116,24 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1163,14 +1161,18 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
ASSERT(false);
phase = 0xff;
}
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
- } else
+ } else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ }
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1182,6 +1184,7 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
@@ -1198,6 +1201,8 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+
}
static void dccg35_get_pixel_rate_div(
@@ -1521,28 +1526,30 @@ static void dccg35_set_physymclk_root_clock_gating(
switch (phy_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+
}
static void dccg35_set_physymclk(
@@ -1643,6 +1650,8 @@ static void dccg35_dpp_root_clock_control(
return;
if (clock_on) {
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
+
/* turn off the DTO and leave phase/modulo at max */
dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
@@ -1654,6 +1663,8 @@ static void dccg35_dpp_root_clock_control(
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
@@ -1771,36 +1782,40 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -1813,9 +1828,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- return;
-
switch (inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index ffd172231fdf..668ee2d405fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 55e8718aad22..5947a35363aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h);
void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst);
void dccg401_set_src_sel(
@@ -230,7 +230,6 @@ void dccg401_set_dp_dto(
const struct dp_dto_params *params);
void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 077337698e0a..b4f5b4a6331a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- // Apply ssed(spread spectrum) dpref clock for edp only.
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
- && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
- && encoding == DP_8b_10b_ENCODING)
+ // Apply ssed(spread spectrum) dpref clock for edp and dp
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 &&
+ dc_is_dp_signal(pix_clk_params->signal_type) &&
+ encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
+
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 0721ae895ae9..94128f7a18b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -257,7 +257,7 @@ bool dce110_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
- enum clock_source_id,
+ enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index d28826c3ae5f..0421b267a0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -292,9 +292,35 @@ static void set_speed(
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
+static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ switch (arbitrate) {
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
+ return true;
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
+ return false;
+ case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
+ default:
+ break;
+ }
+
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return false;
+
+ return true;
+}
+
static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
+ // Deassert soft reset to unblock I2C engine registers
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
+
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
uint32_t reset_length = 0;
@@ -309,8 +335,8 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 1,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+ if (!acquire_engine(dce_i2c_hw))
+ return false;
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
@@ -319,9 +345,8 @@ static bool setup_engine(
i2c_setup_limit = dce_i2c_hw->setup_limit;
/* Program pin select */
- REG_UPDATE_6(DC_I2C_CONTROL,
+ REG_UPDATE_5(DC_I2C_CONTROL,
DC_I2C_GO, 0,
- DC_I2C_SOFT_RESET, 0,
DC_I2C_SEND_RESET, 0,
DC_I2C_SW_STATUS_RESET, 1,
DC_I2C_TRANSACTION_COUNT, 0,
@@ -351,6 +376,32 @@ static bool setup_engine(
return true;
}
+/**
+ * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state
+ * @dce_i2c_hw: Pointer to dce_i2c_hw structure
+ *
+ * If we boot without an HDMI display, the I2C engine does not get initialized
+ * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
+ * acquire. After setting SW_DONE_USING_I2C on release, the engine gets
+ * immediately reacquired by SW, preventing DMUB from using it.
+ *
+ * This function checks the I2C arbitration status and applies a release
+ * workaround if necessary.
+ */
+static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return;
+
+ // Still acquired after release, release again as a workaround
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
+}
+
static void release_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
@@ -378,9 +429,9 @@ static void release_engine(
/*for HW HDCP Ri polling failure w/a test*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
- /* Release I2C after reset, so HW or DMCU could use it */
- REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
- DC_I2C_SW_USE_I2C_REG_REQ, 0);
+ // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ cntl_stuck_hw_workaround(dce_i2c_hw);
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index ccc154b0281c..3b9011ef9b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..e7a318e26d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -4,6 +4,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
+ struct dc_link *link)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable)
+ if (enable) {
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- else
+ // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (!pipe_ctx)
+ return;
+
+ cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ }
+ } else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ else
+ copy_settings_data->hpo_stream_enc_inst = 0;
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ else
+ copy_settings_data->hpo_link_enc_inst = 0;
+ }
+
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
@@ -280,7 +317,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
@@ -319,7 +358,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +371,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +386,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,7 +399,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
@@ -369,7 +412,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..ccbe385e132c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst);
+ uint8_t panel_inst, struct dc_link *link);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index eede83ad91fa..824f73eb3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -25,8 +25,7 @@
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
-DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
- dce60_resource.o
+DCE60 = dce60_timing_generator.o
AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 003a9330c286..88e7a1fc9a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
dm_write_reg(tg->ctx, addr, value);
}
-static void program_timing(struct timing_generator *tg,
+static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
int vready_offset,
int vstartup_start,
@@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request(
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
- .program_timing = program_timing,
+ .program_timing = dce80_timing_generator_program_timing,
.enable_crtc = dce110_timing_generator_enable_crtc,
.disable_crtc = dce110_timing_generator_disable_crtc,
.is_counter_moving = dce110_timing_generator_is_counter_moving,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 5efddd48d5c5..9d160b39e8c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -153,6 +153,14 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index e1d500633dfa..b357683b4255 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
-
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
@@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
-DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1235bf9a596..74962791302f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
/*
* we want a breakdown of the various stages of validation, which the
@@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth(
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
+ if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
@@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth(
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
- } else if (fast_validate) {
+ } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index e9fea9c2162e..2a2eaf6adf26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params(
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
@@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_COUNT();
- out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2077,7 +2077,7 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
@@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
- }
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes);
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
@@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
@@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
@@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
@@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
}
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index b6c34198ddc8..aed00039ca62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
@@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc,
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
@@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
- fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
+ dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 88789987bdbc..e5f5c0663750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg(
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
@@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index f1fe49401bc0..8d24763938ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index d2ae43a82ba5..dfcc5d50071e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate(
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index f567a9023682..ed59c77bc6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 5ed117e11aa2..df9d50b9b57c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
index d32c5bb99f4c..362ac79184ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -35,6 +35,6 @@
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 5865e8fa2d8e..9f3938a50240 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 56dda686e299..6160952245b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
@@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!pipe->stream->hw_cursor_req &&
+ !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
@@ -1478,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
@@ -1516,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
@@ -1559,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc_state_remove_phantom_streams_and_planes(dc, context);
dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
@@ -2137,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
bool repopulate_pipes = false;
@@ -2161,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
for (i = 0; i < context->stream_count; i++)
resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -2171,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
&pipe_cnt, &repopulate_pipes))
goto validate_fail;
}
- if (fast_validate ||
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
@@ -2194,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
@@ -2246,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int flag_vlevel = vlevel;
int i;
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!dc->config.enable_windowed_mpo_odm)
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
@@ -2342,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2388,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
@@ -2409,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
stream_status->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 276e90e4e0ce..273d2bd79d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index d9159ca55412..5d73efa2f0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
@@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index 067480fc3691..d121c5afce71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 17d0b4923b0c..6f516af82956 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
index f93efab9a668..f71d9d8d0759 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
deleted file mode 100644
index 4fbecb5ff349..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dcn401_fpu.h"
-#include "dcn401/dcn401_resource.h"
-// We need this includes for WATERMARKS_* defines
-#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
-#include "link.h"
-
-#define DC_LOGGER_INIT(logger)
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
-{
- /* defaults */
- double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
- double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
- double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
- double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- uint16_t setb_min_uclk_mhz = min_uclk_mhz;
- uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
-
- dc_assert_fp_enabled();
-
- /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
- if (dcfclk_mhz_for_the_second_state)
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
- else
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
- setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
- clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
- clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
- clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
- clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
- }
- /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
- /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
-}
-
-/*
- * dcn401_update_bw_bounding_box
- *
- * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
- * spreadsheet with actual values as per dGPU SKU:
- * - with passed few options from dc->config
- * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
- * need to get it from PM FW)
- * - with passed latency values (passed in ns units) in dc-> bb override for
- * debugging purposes
- * - with passed latencies from VBIOS (in 100_ns units) if available for
- * certain dGPU SKU
- * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
- * of the same ASIC)
- * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
- * FW for different clocks (which might differ for certain dGPU SKU of the
- * same ASIC)
- */
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
-{
- dc_assert_fp_enabled();
-
- /* Override from passed dc->bb_overrides if available*/
- if (dc->bb_overrides.sr_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.urgent_latency_ns)
- dc->dml2_options.bbox_overrides.urgent_latency_us =
- dc->bb_overrides.urgent_latency_ns / 1000.0;
-
- if (dc->bb_overrides.dram_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (dc->bb_overrides.fclk_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
-
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
-
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dc->dml2_options.bbox_overrides.dram_num_chan =
- dc->ctx->dc_bios->vram_info.num_chans;
-
- }
-
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
- dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
- dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
-
- if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
- unsigned int i = 0;
-
- dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- }
- }
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
deleted file mode 100644
index 329f1788843c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DCN401_FPU_H__
-#define __DCN401_FPU_H__
-
-#include "clk_mgr.h"
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
-
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 21fd466dba26..4c21ce42054c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
@@ -94,18 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-
-
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
@@ -117,12 +116,11 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_floa
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 7ae9c0ba0c9e..715f9019a33e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para
result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx,
in_out_params->mode_lib,
in_out_params->in_display_cfg,
- 0,
+ in_out_params->in_start_state_idx,
in_out_params->mode_lib->states.num_states - 1);
if (result)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index 0670e4dc4fd9..dbeb08466092 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -1917,6 +1917,7 @@ struct display_mode_lib_st {
struct dml_mode_support_ex_params_st {
struct display_mode_lib_st *mode_lib;
const struct dml_display_cfg_st *in_display_cfg;
+ dml_uint_t in_start_state_idx;
dml_uint_t out_lowest_state_idx;
struct dml_mode_support_info_st *out_evaluation_info;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 0c8ec30ea672..a06217a9eef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml21_wrapper.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
@@ -11,277 +10,263 @@
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct dml2_soc_bb *soc_bb;
- const struct dml2_soc_qos_parameters *qos_params;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
-
- qos_params = &dml_dcn4_variant_a_soc_qos_params;
- }
-
- /* patch soc bb */
- memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
-
- /* patch qos params */
- memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
-}
-
-static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb));
-}
-
-static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
+ const struct dc *in_dc,
const struct dml2_configuration_options *config)
{
- memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities));
+ bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
+
+ /* ODM options */
+ pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
+ pmo_options->disable_dyn_odm_for_multi_stream = true;
+ pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+
+ pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
+
+ /* NOTE: DRR and SubVP Require FAMS2 */
+ pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
+ in_dc->debug.force_disable_subvp ||
+ disable_fams2;
+ pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
+ disable_fams2;
+ pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
+ disable_fams2;
+ pmo_options->disable_fams2 = disable_fams2;
+
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * Populate dml_init based on default static values in soc bb. The default
+ * values are for reference and support at least minimal operation of current
+ * SoC and DCN hardware. The values could be modifed by subsequent override
+ * functions to reflect our true hardware capability.
+ */
+static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- const struct dml2_ip_capabilities *ip_caps;
-
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
+ dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
+ dml_init->soc_bb = dml2_socbb_dcn401;
+ dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
+ dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ break;
default:
- ip_caps = &dml2_dcn401_max_ip_caps;
+ memset(dml_init, 0, sizeof(*dml_init));
+ DC_ERR("unsupported dcn version for DML21!");
+ return;
}
-
- memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities));
}
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init,
+static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_socbb_params(dml_init, config, in_dc);
- else
- dml21_external_socbb_params(dml_init, config);
+ dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
+ dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
}
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void override_dml_init_with_values_from_smu(
+ struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_ip_params(dml_init, config, in_dc);
- else
- dml21_external_ip_params(dml_init, config);
-}
-
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config, const struct dc *in_dc)
-{
int i;
-
const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
-
- /* override clocks if smu is present */
- if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
+
+ if (!in_dc->clk_mgr->funcs->is_smu_present ||
+ !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
+ /* skip if smu is not present */
+ return;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
}
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
}
}
+ }
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
} else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
}
} else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
}
}
+ }
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
} else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
}
} else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
}
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
}
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
}
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
}
}
+ }
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
} else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
}
} else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
}
}
-
- /* do not override phyclks for now */
- /* phyclk */
- // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000;
- // }
-
- /* phyclk_d18 */
- // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000;
- // }
-
- /* phyclk_d32 */
- // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000;
- // }
}
+}
- dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
- dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+static void override_dml_init_with_values_from_vbios(
+ struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
+ struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
- /* override bounding box paramters from VBIOS */
if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
(in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
@@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
- /* override bounding box paramters from DC config */
- if (in_dc->bb_overrides.sr_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+}
+
+
+static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ /*
+ * TODO - There seems to be overlaps between the values overriden from
+ * dmub and vbios. Investigate and identify the values that DMUB needs
+ * to own.
+ */
+// const struct dmub_soc_bb_params *dmub_bb_params =
+// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
+
+// if (dmub_bb_params == NULL)
+// return;
+
+// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
+// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
+// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
+// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
+// if (dmub_bb_params->fclk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
+// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
+// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
+// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_min_idle_time_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
+// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
+// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #else
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #endif
+// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
+// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
+// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
+// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
+}
+
+static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ if (!config->use_native_soc_bb_construction) {
+ dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
+ dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ if (in_dc->bb_overrides.sr_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+ in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
- if (in_dc->bb_overrides.dram_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ if (in_dc->bb_overrides.dram_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.fclk_change_blackout_us =
+ if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
- }
+}
- //TODO
- // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) {
- // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- // }
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ populate_default_dml_init_params(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_smu(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -526,7 +599,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp
static void populate_dml21_stream_overrides_from_stream_state(
struct dml2_stream_parameters *stream_desc,
- struct dc_stream_state *stream)
+ struct dc_stream_state *stream,
+ struct dc_stream_status *stream_status)
{
switch (stream->debug.force_odm_combine_segments) {
case 0:
@@ -551,7 +625,9 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp ||
+ stream->hw_cursor_req ||
+ stream_status->mall_stream_config.cursor_size_limit_subvp;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -723,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state(
switch (plane_state->tiling_info.gfxversion) {
case DcGfxVersion7:
case DcGfxVersion8:
- // Placeholder for programming the array_mode
break;
case DcGfxVersion9:
case DcGfxVersion10:
@@ -785,6 +860,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;
@@ -886,6 +962,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ default:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
@@ -910,7 +987,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
+static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -918,10 +995,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- if (context->stream_status[i].plane_states[j] == plane) {
- *plane_id = (i << 16) | j;
- return true;
+ if (context->streams[i]->stream_id == stream_id) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ if (context->stream_status[i].plane_states[j] == plane) {
+ *plane_id = (i << 16) | j;
+ return true;
+ }
}
}
}
@@ -944,14 +1023,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
- if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
+ if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1021,7 +1100,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
- populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+ populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed;
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
@@ -1037,7 +1116,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
- disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
+ disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@@ -1048,7 +1127,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
- if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 069b939c672a..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -11,18 +11,18 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
+struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 930e86cdb88a..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* reset fams2 data */
memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc,
memcpy(static_base_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
sizeof(union dmub_cmd_fams2_config));
- memcpy(static_sub_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
- sizeof(union dmub_cmd_fams2_config));
+
+ if (dc->debug.fams_version.major == 3) {
+ memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams],
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2,
+ sizeof(union dmub_fams2_stream_static_sub_state_v2));
+ } else {
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+ }
switch (dc->debug.fams_version.minor) {
case 1:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 5d16f36ec95c..03de3cf06ae5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -2,8 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include <linux/vmalloc.h>
-
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
@@ -12,6 +10,8 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#define INVALID -1
+
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
*dml_ctx = vzalloc(sizeof(struct dml2_context));
@@ -35,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
return true;
}
-static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
{
- bool disable_fams2;
- struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
-
- /* ODM options */
- pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
- pmo_options->disable_dyn_odm_for_multi_stream = true;
- pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+ dml_ctx->config = *config;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
@@ -53,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
-
- pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
-
- /* NOTE: DRR and SubVP Require FAMS2 */
- disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
- in_dc->debug.force_disable_subvp ||
- disable_fams2;
- pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
- disable_fams2;
- pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
- disable_fams2;
- pmo_options->disable_fams2 = disable_fams2;
-
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
- in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
- pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- break;
- default:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
- }
- (*dml_ctx)->architecture = dml2_architecture_21;
+ dml_ctx->architecture = dml2_architecture_21;
- /* Store configuration options */
- (*dml_ctx)->config = *config;
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
DC_FP_START();
- /*Initialize SOCBB and DCNIP params */
- dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
-
- /* apply debug overrides */
- dml21_apply_debug_options(in_dc, *dml_ctx, config);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
- /*Initialize DML21 instance */
- dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
DC_FP_END();
}
@@ -109,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
if (!dml21_allocate_memory(dml_ctx))
return false;
- dml21_init(in_dc, dml_ctx, config);
+ dml21_init(in_dc, *dml_ctx, config);
return true;
}
@@ -208,10 +172,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -234,7 +228,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
+ DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
+ DC_FP_END();
if (!result)
return false;
@@ -244,6 +240,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
@@ -277,27 +281,26 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+ DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
+ DC_FP_END();
if (!is_supported)
return false;
return true;
}
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
- DC_FP_START();
-
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
- DC_FP_END();
-
return out;
}
@@ -456,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index b2075b8c363b..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -8,11 +8,13 @@
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
struct dc;
struct dc_state;
struct dml2_configuration_options;
struct dml2_context;
+enum dc_validate_mode;
/**
* dml2_create - Creates dml21_context.
@@ -32,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
/**
* dml21_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
*
* Based on fast_validate option internally would call:
*
- * -dml21_mode_check_and_programming - for non fast_validate option
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
* Calculates if dc_state can be supported on the input display
* configuration. If supported, generates the necessary HW
* programming for the new dc_state.
*
- * -dml21_check_mode_support - for fast_validate option
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
* Calculates if dc_state can be supported for the input display
* config.
@@ -55,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
* separate dc_states for validation.
* Return: True if mode is supported, false otherwise.
*/
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
@@ -65,4 +69,67 @@ struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
+
+/*mcache parameters decided by dml*/
+struct dc_mcache_params {
+ bool valid;
+ /*
+ * For iMALL, dedicated mall mcaches are required (sharing of last
+ * slice possible), for legacy phantom or phantom without return
+ * the only mall mcaches need to be valid.
+ */
+ bool requires_dedicated_mall_mcache;
+ unsigned int num_mcaches_plane0;
+ unsigned int num_mcaches_plane1;
+ /*
+ * Generally, plane0/1 slices must use a disjoint set of caches
+ * but in some cases the final segement of the two planes can
+ * use the same cache. If plane0_plane1 is set, then this is
+ * allowed.
+ *
+ * Similarly, the caches allocated to MALL prefetcher are generally
+ * disjoint, but if mall_prefetch is set, then the final segment
+ * between the main and the mall pixel requestor can use the same
+ * cache.
+ *
+ * Note that both bits may be set at the same time.
+ */
+ struct {
+ bool mall_comb_mcache_p0;
+ bool mall_comb_mcache_p1;
+ bool plane0_plane1;
+ } last_slice_sharing;
+ /*
+ * A plane is divided into vertical slices of mcaches,
+ * which wrap on the surface width.
+ *
+ * For example, if the surface width is 7680, and split into
+ * three slices of equal width, the boundary array would contain
+ * [2560, 5120, 7680]
+ *
+ * The assignments are
+ * 0 = [0 .. 2559]
+ * 1 = [2560 .. 5119]
+ * 2 = [5120 .. 7679]
+ * 0 = [7680 .. INF]
+ * The final element implicitly is the same as the first, and
+ * at first seems invalid since it is never referenced (since)
+ * it is outside the surface. However, its useful when shifting
+ * (see below).
+ *
+ * For any given valid mcache assignment, a shifted version, wrapped
+ * on the surface width boundary is also assumed to be valid.
+ *
+ * For example, shifting [2560, 5120, 7680] by -50 results in
+ * [2510, 5170, 7630].
+ *
+ * The assignments are now:
+ * 0 = [0 .. 2509]
+ * 1 = [2510 .. 5169]
+ * 2 = [5170 .. 7629]
+ * 0 = [7630 .. INF]
+ */
+ int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
+ int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 25b607e7b726..b05030926ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -46,6 +46,7 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
+ uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
@@ -156,6 +157,8 @@ struct dml2_dchub_watermark_regs {
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
+ uint32_t sr_enter_z8;
+ uint32_t sr_exit_z8;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 5e1ab6d97640..e8dc6471c0be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -166,7 +166,7 @@ struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
- unsigned long pitch;
+ unsigned long pitch; // In elements, two pixels per element in 422 packed format
unsigned long width;
unsigned long height;
} plane0;
@@ -222,6 +222,7 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool upsp_enabled;
struct {
double h_ratio;
double v_ratio;
@@ -385,6 +386,7 @@ struct dml2_plane_parameters {
long reserved_vblank_time_ns;
unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
+ unsigned int hostvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
@@ -425,6 +427,7 @@ struct dml2_stream_parameters {
struct dml2_display_cfg {
bool gpuvm_enable;
+ bool ffbm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 5f0bc42d1d2f..8c9f414aa6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters {
double dram_clk_change_write_only_us;
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
+ double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
@@ -130,6 +133,7 @@ struct dml2_soc_state_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
@@ -138,6 +142,7 @@ struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 0dbf886d8926..98c0234e2f47 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate {
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
- dml2_output_rate_hdmi_rate_12x4 = 12
+ dml2_output_rate_hdmi_rate_12x4 = 12,
+ dml2_output_rate_hdmi_rate_16x4 = 13,
+ dml2_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_pmo_options {
@@ -279,7 +281,10 @@ struct dml2_per_stream_programming {
} phantom_stream;
union dmub_cmd_fams2_config fams2_base_params;
- union dmub_cmd_fams2_config fams2_sub_params;
+ union {
+ union dmub_cmd_fams2_config fams2_sub_params;
+ union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2;
+ };
};
//-----------------
@@ -674,9 +679,14 @@ struct dml2_display_cfg_programming {
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
+ bool failed_prefetch;
+ bool failed_uclk_pstate;
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_mode_programming_dcfclk;
+ bool failed_mode_programming_prefetch;
+ bool failed_mode_programming_flip;
bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index bb863c8c6b39..6ee37386f672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -456,10 +456,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
@@ -509,7 +509,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
- dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
+ DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 4c504cb0e1c5..b9cff2198511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -54,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned
static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
@@ -179,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg
} else {
out_bpp[k] = 0;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
}
}
@@ -212,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru
num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
return num_active_pipes;
}
@@ -251,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg,
unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
+ DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
return is_phantom;
}
@@ -415,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
*nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
if (nomDETInKByteOverrideEnable) {
*nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
}
}
@@ -502,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -535,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
}
}
@@ -570,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x) {
version = 11;
} else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -645,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes(
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
+ DML_ASSERT(0);
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
+ DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y);
+ DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
*surf_linear128_l = 0;
@@ -703,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes(
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
if (SurfaceTiling == dml2_gfx11_sw_linear) {
@@ -752,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes(
} else if (SurfaceTiling == dml2_sw_256kb_2d) {
macro_tile_scale = 32;
} else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
+ DML_ASSERT(0);
}
*MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
@@ -766,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
}
static void CalculateSinglePipeDPPCLKAndSCLThroughput(
@@ -860,10 +848,8 @@ static void CalculateSwathWidth(
unsigned int surface_width_ub_c;
unsigned int surface_height_ub_c;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -872,11 +858,9 @@ static void CalculateSwathWidth(
SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
MainSurfaceODMMode = ODMMode[k];
@@ -899,13 +883,11 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
SwathWidthC[k] = SwathWidthY[k] / 2;
@@ -934,22 +916,20 @@ static void CalculateSwathWidth(
surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
req_per_swath_ub_l[k] = 0;
req_per_swath_ub_c[k] = 0;
@@ -995,15 +975,12 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
}
}
@@ -1018,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi
if (unb_req_force_en) {
unb_req_en = unb_req_force_val && unb_req_ok;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
+ DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
+ DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
+ return unb_req_en;
}
static void CalculateDETBufferSize(
@@ -1054,16 +1029,14 @@ static void CalculateDETBufferSize(
bool NextPotentialSurfaceToAssignDETPieceFound;
bool MinimizeReallocationSuccess = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
@@ -1092,19 +1065,15 @@ static void CalculateDETBufferSize(
l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
if (l->minDET_pipe == 0) {
l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
}
if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
@@ -1117,12 +1086,10 @@ static void CalculateDETBufferSize(
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
}
if (display_cfg->minimize_det_reallocation) {
@@ -1194,14 +1161,12 @@ static void CalculateDETBufferSize(
l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
}
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
+ DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
@@ -1213,10 +1178,8 @@ static void CalculateDETBufferSize(
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
}
for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
@@ -1224,22 +1187,18 @@ static void CalculateDETBufferSize(
l->NextSurfaceToAssignDETPiece = 0;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) {
l->NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
@@ -1249,20 +1208,16 @@ static void CalculateDETBufferSize(
* (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte,
math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true;
@@ -1274,13 +1229,11 @@ static void CalculateDETBufferSize(
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
-#endif
}
static double CalculateRequiredDispclk(
@@ -1510,15 +1463,13 @@ static unsigned int dscceComputeDelay(
//pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
+ DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc);
+ DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP);
+ DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
+ DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices);
+ DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels);
return pixels;
}
@@ -1593,10 +1544,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e
// sft
Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
+ DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay);
return Delay;
}
@@ -1667,10 +1616,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
}
meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
+ DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
if (p->GPUVMEnable == true) {
double meta_vmpg_bytes = 4.0 * 1024.0;
*p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
@@ -1724,25 +1671,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
+ DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
+ DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
+ DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
if (p->SurfaceTiling == dml2_sw_linear) {
*p->PixelPTEReqHeight = 1;
@@ -1778,22 +1723,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
__func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
+ DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
+ DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
*p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
@@ -1811,7 +1754,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_height_linear = 128;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
#endif
} else if (!dml_is_vertical_rotation(p->RotationAngle)) {
@@ -1825,7 +1768,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
#endif
*p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
@@ -1840,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
#endif
}
@@ -1852,18 +1795,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
#endif
return vm_bytes;
@@ -1894,12 +1837,12 @@ static unsigned int CalculatePrefetchSourceLines(
double numLines = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps);
+ DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
+ DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
@@ -1934,11 +1877,11 @@ static unsigned int CalculatePrefetchSourceLines(
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
+ DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
+ DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
+ DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return (unsigned int)(numLines);
@@ -2007,8 +1950,8 @@ static void CalculateMALLUseForStaticScreen(
if (is_using_mall_for_ss[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
@@ -2022,7 +1965,7 @@ static void CalculateMALLUseForStaticScreen(
(!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
}
}
if (CanAddAnotherSurfaceToMALL) {
@@ -2030,8 +1973,8 @@ static void CalculateMALLUseForStaticScreen(
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
@@ -2203,15 +2146,15 @@ static void CalculateDCCConfiguration(
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
+ DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
@@ -2301,12 +2244,12 @@ static void CalculateDCCConfiguration(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
#endif
}
@@ -2326,26 +2269,26 @@ static void calculate_mcache_row_bytes(
unsigned int mvmpg_per_mcache;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
+ DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans);
+ DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
+ DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
+ DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
+ DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
+ DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
+ DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
+ DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width);
+ DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
+#endif
+ DML_ASSERT(p->mcache_line_size_bytes != 0);
+ DML_ASSERT(p->mcache_size_bytes != 0);
*p->mvmpg_width = 0;
*p->mvmpg_height = 0;
@@ -2370,8 +2313,8 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_width = p->vmpg_width;
*p->mvmpg_height = p->vmpg_height;
} else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
+ DML_ASSERT(0);
}
}
@@ -2439,25 +2382,25 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
+ DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
- DML2_ASSERT(*p->num_mcaches > 0);
+ DML_ASSERT(*p->num_mcaches > 0);
}
static void calculate_mcache_setting(
@@ -2523,7 +2466,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- DML2_ASSERT(*p->num_mcaches_l > 0);
+ DML_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2559,7 +2502,7 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- DML2_ASSERT(*p->num_mcaches_c > 0);
+ DML_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
@@ -2598,28 +2541,28 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
+ DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
+ DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
+ DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
+ DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
}
#endif
// calculate split_coordinate
@@ -2639,11 +2582,11 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
if (l->is_dual_plane) {
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
}
#endif
@@ -2660,10 +2603,10 @@ static void calculate_mcache_setting(
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
#endif
}
@@ -2694,8 +2637,8 @@ static void calculate_mall_bw_overhead_factor(
mall_prefetch_dram_overhead_factor[k] = 2.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
#endif
}
}
@@ -2772,22 +2715,20 @@ static double dml_get_return_bandwidth_available(
else // dml2_core_internal_bw_dram
return_bw_mbps = derate_dram_bandwidth;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
+ DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
+ DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
+ DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
return return_bw_mbps;
}
@@ -2807,9 +2748,9 @@ static noinline_for_stack void calculate_bandwidth_available(
{
unsigned int n, m;
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
// Calculate all the bandwidth availabe
for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
@@ -2828,8 +2769,8 @@ static noinline_for_stack void calculate_bandwidth_available(
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
#endif
// urg_bandwidth_available_vm_only is indexed by soc_state
@@ -2843,9 +2784,9 @@ static noinline_for_stack void calculate_bandwidth_available(
urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
#endif
}
}
@@ -2879,13 +2820,13 @@ static void calculate_avg_bandwidth_required(
// SysActive and SVP Prefetch AVG bandwidth Check
for (k = 0; k < num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
#endif
sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
@@ -2902,10 +2843,10 @@ static void calculate_avg_bandwidth_required(
avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
#endif
}
}
@@ -3080,10 +3021,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->MaxNumSwathY[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
#endif
p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
@@ -3091,8 +3032,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
#endif
if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
p->PTEBufferSizeNotExceeded[k] = true;
@@ -3104,18 +3045,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
#ifdef __DML_VBA_DEBUG__
if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
}
#endif
}
@@ -3146,8 +3087,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
p->DCCMetaBufferSizeNotExceeded[k] = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
#endif
p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
(dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
@@ -3170,9 +3111,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->DCCMetaBufferSizeNotExceeded[k] = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
#endif
}
@@ -3209,20 +3150,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->dpte_row_bw[k],
&p->meta_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
#endif
}
}
@@ -3257,19 +3198,19 @@ static double CalculateUrgentLatency(
}
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
} else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
}
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
#endif
return urgent_latency;
}
@@ -3296,18 +3237,18 @@ static double CalculateTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
#endif
@@ -3334,14 +3275,14 @@ static double CalculateMetaTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
#endif
@@ -3358,7 +3299,6 @@ static void calculate_cursor_req_attributes(
unsigned int *cursor_bytes_per_chunk,
unsigned int *cursor_bytes)
{
- unsigned int cursor_pitch = 0;
unsigned int cursor_bytes_per_req = 0;
unsigned int cursor_width_bytes = 0;
unsigned int cursor_height = 0;
@@ -3366,10 +3306,6 @@ static void calculate_cursor_req_attributes(
//SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
//- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
//- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
//The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
@@ -3409,8 +3345,8 @@ static void calculate_cursor_req_attributes(
*cursor_lines_per_chunk = 1;
} else {
if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_ASSERT(0);
}
}
@@ -3421,15 +3357,15 @@ static void calculate_cursor_req_attributes(
cursor_height = cursor_width;
*cursor_bytes = *cursor_bytes_per_line * cursor_height;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
+ DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width);
+ DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -3460,13 +3396,13 @@ static void calculate_cursor_urgent_burst_factor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3501,15 +3437,15 @@ static void CalculateUrgentBurstFactor(
*UrgentBurstFactorChroma = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
- DML2_ASSERT(VRatio > 0);
+ DML_ASSERT(VRatio > 0);
LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
@@ -3534,12 +3470,12 @@ static void CalculateUrgentBurstFactor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3600,10 +3536,10 @@ static void CalculateDCFCLKDeepSleepTdlut(
if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
- dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
- dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
- dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
- dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
// increase the deepsleep dcfclk to match the original dispclk throughput rate
if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
@@ -3613,8 +3549,8 @@ static void CalculateDCFCLKDeepSleepTdlut(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
@@ -3625,17 +3561,17 @@ static void CalculateDCFCLKDeepSleepTdlut(
*DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
static noinline_for_stack void CalculateDCFCLKDeepSleep(
@@ -3731,12 +3667,12 @@ static unsigned int CalculateMaxVStartup(
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
+ DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom);
+ DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
+ DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us);
+ DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
+ DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
+ DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
@@ -3761,9 +3697,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
const long MAXIMUMCOMPRESSION = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
}
#endif
CalculateSwathWidth(
@@ -3797,15 +3733,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]);
p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
#endif
if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
@@ -3848,11 +3784,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
#endif
*p->ViewportSizeSupport = true;
@@ -3860,7 +3796,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
#endif
if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
p->SwathHeightY[k] = MaximumSwathHeightY[k];
@@ -3917,13 +3853,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
*p->ViewportSizeSupport = false;
- dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
- dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
p->ViewportSizeSupportPerSurface[k] = false;
} else {
p->ViewportSizeSupportPerSurface[k] = true;
@@ -3931,35 +3867,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if (p->SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
p->DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
#endif
}
@@ -3969,12 +3905,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
(double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
#endif
*p->hw_debug5 = false;
@@ -3989,12 +3925,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
+ *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
*p->hw_debug5 = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
#endif
@@ -4192,15 +4128,15 @@ static noinline_for_stack void CalculateODMMode(
SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
+ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
+ DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
+ DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
#endif
if (ODMUse == dml2_odm_mode_auto)
DecidedODMMode = DecideODMMode(HActive,
@@ -4245,10 +4181,10 @@ static noinline_for_stack void CalculateODMMode(
*NumberOfDPP = NumberOfDPPRequired;
*RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
+ DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
#endif
}
@@ -4292,17 +4228,17 @@ static noinline_for_stack void CalculateOutputLink(
*OutputRate = dml2_core_internal_output_rate_unknown;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
+ DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
+ DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
+ DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
#endif
{
if (Output == dml2_hdmi) {
@@ -4487,9 +4423,9 @@ static noinline_for_stack void CalculateOutputLink(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
+ DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
+ DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
+ DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
#endif
}
@@ -4571,17 +4507,17 @@ static unsigned int DSCDelayRequirement(
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
+ DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode);
+ DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
+ DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
+ DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
#endif
return DSCDelayRequirement_val;
@@ -4654,10 +4590,10 @@ static void CalculateSurfaceSizeInMall(
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
+ DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
+ DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
#endif
}
@@ -4674,7 +4610,6 @@ static void calculate_tdlut_setting(
unsigned int tdlut_vmpg_per_frame;
unsigned int tdlut_pte_req_per_frame;
unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
double tdlut_drain_rate;
unsigned int tdlut_mpc_width;
unsigned int tdlut_bytes_per_group_simple;
@@ -4737,44 +4672,47 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
*p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
}
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
+ *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ else
+ *p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
*p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
+ DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
+ DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
+ DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
+ DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1));
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
+ DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -4820,10 +4758,10 @@ static void CalculateTarb(
*Tarb = extra_bytes / ReturnBW;
*Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
#endif
}
@@ -4838,10 +4776,10 @@ static double CalculateTWait(
TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
+ DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
}
@@ -4887,20 +4825,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters(
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
+ DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
+ DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
+ DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
@@ -4923,7 +4861,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
- double PrefetchBandwidthOto[],
+ double PrefetchBandwidthMax[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4962,11 +4900,11 @@ static double get_urgent_bandwidth_required(
l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
+ bool exclude_this_plane = false;
// Exclude phantom pipe in bw calculation for non svp prefetch state
if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
+ exclude_this_plane = true;
// The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip.
// The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe.
@@ -4987,20 +4925,20 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
- l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
- dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
- dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
- dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
- dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
- dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
#endif
} else {
surface_required_bw[k] = 0.0;
@@ -5009,34 +4947,34 @@ static double get_urgent_bandwidth_required(
l->required_bandwidth_mbps += surface_required_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
- dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
#endif
}
@@ -5120,19 +5058,19 @@ static void CalculateExtraLatency(
*ExtraLatency_sr = *ExtraLatency_sr + Tarb;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips);
+ DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
+ DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
+ DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
#endif
}
@@ -5187,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
- *p->RequiredPrefetchBWOTO = 0.0;
+ *p->RequiredPrefetchBWMax = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5199,20 +5137,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
+ DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
+ DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
#endif
CalculateVUpdateAndDynamicMetadataParameters(
p->MaxInterDCNTileRepeaters,
@@ -5258,11 +5196,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
*p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
} else {
*p->NotEnoughTimeForDynamicMetadata = false;
}
@@ -5288,21 +5226,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
+ DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
+ DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
+ DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5314,17 +5252,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
*p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
#endif
if (p->display_cfg->gpuvm_enable) {
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
@@ -5402,7 +5340,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
}
/* oto prefetch bw should be always be less than total vactive bw */
- //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+ //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
@@ -5418,12 +5356,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
* mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
* and the required bandwidth increases when going from ms to mp
*/
- *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+ *p->RequiredPrefetchBWMax = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
- dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
- dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
#endif
if (p->display_cfg->gpuvm_enable == true) {
@@ -5433,9 +5371,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
s->Tvm_oto = s->Tvm_trips_rounded;
@@ -5447,9 +5385,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
} else
s->Tr0_oto = s->LineTime / 4.0;
@@ -5459,11 +5397,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
#ifdef DML_GLOBAL_PREFETCH_CHECK
- dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
if (p->impacted_dst_y_pre > 0) {
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
}
#endif
*p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
@@ -5492,72 +5430,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
- double Tpre = s->dst_y_prefetch_equ * s->LineTime;
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
+ DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
+ DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
+#endif
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
*p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
+ DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
+ DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
*p->dst_y_per_vm_vblank = 0;
@@ -5596,19 +5533,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw1 = 0;
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
(*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
+ DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
+ DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
@@ -5620,10 +5557,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw2 = 0;
- dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
- dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
@@ -5634,10 +5571,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw3 = 0;
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
@@ -5647,17 +5584,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
- dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
+ DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
#endif
{
bool Case1OK = false;
@@ -5676,14 +5613,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
- dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
if (s->prefetch_bw1 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
double row_transfer_time = total_row_bytes / s->prefetch_bw1;
- dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
@@ -5696,8 +5633,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw2 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
double row_transfer_time = total_row_bytes / s->prefetch_bw2;
- dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
@@ -5709,8 +5646,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw3 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
double row_transfer_time = total_row_bytes / s->prefetch_bw3;
- dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
@@ -5730,10 +5667,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK);
+ DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK);
+ DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
#endif
if (s->prefetch_bw_equ > 0) {
@@ -5753,12 +5690,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
s->Tvm_equ = 0;
s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
+ DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
+ DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
@@ -5769,7 +5706,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
} else {
@@ -5781,11 +5718,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+
+ /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming.
+ * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data
+ * bandwidth may end up higher than what was calculated in mode support.
+ */
+ *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
@@ -5797,31 +5740,31 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
+ DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- DML2_ASSERT(*p->dst_y_prefetch < 64);
+ DML_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
*p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
*p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
#endif
if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
@@ -5829,13 +5772,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
#endif
}
@@ -5843,22 +5786,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
#endif
if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
#endif
}
@@ -5866,36 +5809,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
+ DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
+ DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
+ DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
+ DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
__func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
- __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5916,18 +5857,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_vm_bw = 0;
} else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
@@ -5936,14 +5877,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5963,12 +5904,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_vmrow_bw = 0;
}
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
return s->NoTimeToPrefetch;
}
@@ -6005,7 +5946,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- DML2_ASSERT(max_idx >= 0);
+ DML_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -6037,12 +5978,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
// worst case if the rob and cdb is fully hogged
s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
- dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
- dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
- dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
- dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
#endif
// calculate the return impact from each plane, request is 256B per dcfclk
@@ -6063,12 +6004,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
- dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
- dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
#endif
if (s->src_swath_bytes_c[i] > 0) { // dual_plane
@@ -6079,10 +6020,10 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
- dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
#endif
}
@@ -6090,9 +6031,9 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
- dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
- dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
#endif
// clamping to worst case delay which is one which occupy the full rob+cdb
if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
@@ -6109,7 +6050,7 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+ DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
#endif
}
@@ -6120,8 +6061,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
*p->recalc_prefetch_schedule = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
- dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
#endif
}
} else {
@@ -6131,8 +6072,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
- dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+ DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
#endif
return s->prefetch_global_check_passed;
@@ -6150,8 +6091,8 @@ static void calculate_peak_bandwidth_required(
memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
#endif
for (unsigned int k = 0; k < p->num_active_planes; ++k) {
@@ -6180,7 +6121,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6217,7 +6158,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6254,7 +6195,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6291,7 +6232,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6328,7 +6269,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6347,12 +6288,12 @@ static void calculate_peak_bandwidth_required(
p->surface_peak_required_bw[m][n]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6414,18 +6355,18 @@ static void check_urgent_bandwidth_support(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
+ DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]);
}
@@ -6446,14 +6387,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal
flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
#endif
return flip_bw_available_mbps;
@@ -6478,28 +6419,28 @@ static void calculate_immediate_flip_bandwidth_support(
*flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
*flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
}
@@ -6549,27 +6490,27 @@ static void CalculateFlipSchedule(
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
#endif
if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
@@ -6596,9 +6537,9 @@ static void CalculateFlipSchedule(
l->min_row_time = l->min_row_height * LineTime / VRatio;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- DML2_ASSERT(l->min_row_time > 0);
+ DML_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -6619,20 +6560,20 @@ static void CalculateFlipSchedule(
l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6640,8 +6581,8 @@ static void CalculateFlipSchedule(
(l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#endif
}
@@ -6653,13 +6594,12 @@ static void CalculateFlipSchedule(
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
- double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
- dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
+ DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes);
#endif
if (l->ImmediateFlipBW == 0) {
l->Tvm_flip = 0;
@@ -6674,11 +6614,11 @@ static void CalculateFlipSchedule(
LineTime / 4.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
#endif
*dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
*dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
@@ -6711,14 +6651,14 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
+ DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
}
@@ -6736,7 +6676,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
#endif
p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
@@ -6755,20 +6695,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
@@ -6801,11 +6741,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
+ DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
#endif
s->TotalPixelBW = 0.0;
@@ -6836,11 +6776,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
#endif
s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
@@ -6943,16 +6883,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
#endif
p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
@@ -6967,10 +6907,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
#endif
}
}
@@ -6992,10 +6932,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
+ DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
#endif
}
@@ -7141,7 +7081,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -7153,32 +7093,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
break;
}
}
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
return index;
}
static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
@@ -7218,10 +7156,10 @@ static void calculate_hostvm_inefficiency_factor(
if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
*HostVMInefficiencyFactorPrefetch = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
#endif
}
}
@@ -7335,30 +7273,660 @@ static void calculate_pstate_keepout_dst_lines(
}
}
+static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct dml2_display_cfg *display_cfg)
+{
+ struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
+ struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
+ struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
+ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
+
+ double min_return_bw_for_latency;
+ unsigned int k;
+
+ mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
+
+ calculate_hostvm_inefficiency_factor(
+ &s->HostVMInefficiencyFactor,
+ &s->HostVMInefficiencyFactorPrefetch,
+
+ display_cfg->gpuvm_enable,
+ display_cfg->hostvm_enable,
+ mode_lib->ip.remote_iommu_outstanding_translations,
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
+ mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
+
+ mode_lib->ms.Total3dlutActive = 0;
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
+ mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
+
+ // Calculate tdlut schedule related terms
+ calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
+ calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
+ calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
+ calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
+ calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
+ calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
+ calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
+ calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
+
+ // output
+ calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
+ calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
+ calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
+
+ calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
+ }
+
+ min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
+ CalculateExtraLatency(
+ display_cfg,
+ mode_lib->ip.rob_buffer_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
+ mode_lib->ms.DCFCLK,
+ mode_lib->ms.FabricClock,
+ mode_lib->ip.pixel_chunk_size_kbytes,
+ min_return_bw_for_latency,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.dpte_group_bytes,
+ s->tdlut_bytes_per_group,
+ s->HostVMInefficiencyFactor,
+ s->HostVMInefficiencyFactorPrefetch,
+ mode_lib->soc.hostvm_min_page_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_type,
+ !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.request_size_bytes_luma,
+ mode_lib->ms.support.request_size_bytes_chroma,
+ mode_lib->ip.meta_chunk_size_kbytes,
+ mode_lib->ip.dchub_arb_to_ret_delay,
+ mode_lib->ms.TripToMemory,
+ mode_lib->ip.hostvm_mode,
+
+ // output
+ &mode_lib->ms.ExtraLatency,
+ &mode_lib->ms.ExtraLatency_sr,
+ &mode_lib->ms.ExtraLatencyPrefetch);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
+ mode_lib->ms.support.PrefetchSupported = true;
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
+
+ mode_lib->ms.TWait[k] = CalculateTWait(
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0);
+
+ myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
+ myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
+ myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
+ myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
+ myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
+ myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
+ myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
+ myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
+ myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
+ myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
+ myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
+ myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
+ myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
+ myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
+ myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
+ myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
+ myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
+ myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ myPipe->ODMMode = mode_lib->ms.ODMMode[k];
+ myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
+ myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
+ myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
+ myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
+#endif
+ CalculatePrefetchSchedule_params->display_cfg = display_cfg;
+ CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
+ CalculatePrefetchSchedule_params->myPipe = myPipe;
+ CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
+ CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
+ CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
+ CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
+ CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
+ CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
+ CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
+ CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
+ CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
+ CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
+ CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
+ CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
+ CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
+ CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
+ CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
+ CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
+ CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
+ CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
+ CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
+ CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
+ CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
+ CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
+ CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
+ CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
+ CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
+ CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
+ CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
+ CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
+
+ // output
+ CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
+ CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
+ CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
+ CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
+ CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k];
+ CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
+ CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
+ CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
+ CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
+ CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
+ CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
+ CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
+ CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
+ CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
+ CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
+ CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
+ CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
+
+ mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+
+ mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
+ } // for k num_planes
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.dst_y_prefetch[k] < 2.0
+ || mode_lib->ms.LinesForVM[k] >= 32.0
+ || mode_lib->ms.LinesForDPTERow[k] >= 16.0
+ || mode_lib->ms.NoTimeForPrefetch[k] == true
+ || s->DSTYAfterScaler[k] > 8) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
+ }
+ }
+
+ mode_lib->ms.support.DynamicMetadataSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
+ mode_lib->ms.support.DynamicMetadataSupported = false;
+ }
+ }
+
+ mode_lib->ms.support.VRatioInPrefetchSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
+ mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
+ }
+ }
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
+ // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
+ if (mode_lib->ms.support.PrefetchSupported) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ // Calculate Urgent burst factor for prefetch
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
+#endif
+ CalculateUrgentBurstFactor(
+ &display_cfg->plane_descriptors[k],
+ mode_lib->ms.swath_width_luma_ub[k],
+ mode_lib->ms.swath_width_chroma_ub[k],
+ mode_lib->ms.SwathHeightY[k],
+ mode_lib->ms.SwathHeightC[k],
+ s->line_times[k],
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.VRatioPreY[k],
+ mode_lib->ms.VRatioPreC[k],
+ mode_lib->ms.BytePerPixelInDETY[k],
+ mode_lib->ms.BytePerPixelInDETC[k],
+ mode_lib->ms.DETBufferSizeY[k],
+ mode_lib->ms.DETBufferSizeC[k],
+ /* Output */
+ &mode_lib->ms.UrgentBurstFactorLumaPre[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+
+ // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
+ // assume flip bw is 0 at this point
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ mode_lib->ms.final_flip_bw[k] = 0;
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 0;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ // Check urg peak bandwidth against available urg bw
+ // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
+ check_urgent_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth
+ &s->dummy_single[1], // double* frac_urg_bandwidth_mall
+ &mode_lib->ms.support.UrgVactiveBandwidthSupport,
+ &mode_lib->ms.support.PrefetchBandwidthSupported,
+
+ mode_lib->soc.mall_allocated_for_dcn_mbytes,
+ mode_lib->ms.support.non_urg_bandwidth_required,
+ mode_lib->ms.support.urg_vactive_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
+ DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+ }
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
+
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+
+ }
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
+ s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
+ s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
+ s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
+ s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
+ s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.USRRetrainingLatency = 0;
+ s->mSOCParameters.SMNLatency = 0;
+ s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
+
+ CalculateWatermarks_params->display_cfg = display_cfg;
+ CalculateWatermarks_params->USRRetrainingRequired = false;
+ CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+ CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
+ CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
+ CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
+ CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
+ CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
+ CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
+ CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
+ CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
+ CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
+ CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
+ CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
+ CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
+ CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
+ CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
+ CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
+ CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
+ CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
+ CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
+ CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
+ CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
+ CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
+ CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
+ CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
+ CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
+
+ // Output
+ CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
+ CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
+ CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
+ CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
+ CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
+ CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
+
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__);
+
+}
+
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-#if defined(__DML_VBA_DEBUG__)
- double old_ReadBandwidthLuma;
- double old_ReadBandwidthChroma;
-#endif
double outstanding_latency_us = 0;
- double min_return_bw_for_latency;
struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
-#endif
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params;
unsigned int k, m, n;
@@ -7374,9 +7942,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
+ mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000;
mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
+ mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
@@ -7384,25 +7952,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
+ DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
+ DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
+ DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7504,12 +8072,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
#ifdef __DML_VBA_DEBUG__
- old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
@@ -7629,13 +8195,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
}
/* Cursor Support Check */
@@ -7672,11 +8238,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
mode_lib->ms.support.PitchSupport = false;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
#endif
}
@@ -7708,11 +8274,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
}
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
@@ -7894,8 +8460,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
#endif
// ensure the number dsc slices is integer multiple based on ODM mode
@@ -7911,9 +8477,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0);
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.DSCSlicesODMModeSupported) {
- dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
- dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
}
#endif
} else {
@@ -7958,7 +8524,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
+ DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
}
} else {
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
@@ -7968,7 +8534,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
#endif
}
@@ -8138,7 +8704,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout);
- if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
+ if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
} else {
@@ -8167,7 +8733,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->DSCFormatFactor = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
#endif
if (mode_lib->ms.RequiresDSC[k] == true) {
s->PixelClockBackEndFactor = 3.0;
@@ -8185,10 +8751,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
#endif
}
}
@@ -8423,13 +8989,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
#endif
/* VActive bytes to fetch for UCLK P-State */
@@ -8502,7 +9068,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
@@ -8531,9 +9097,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
#endif
CalculateUrgentBurstFactor(
@@ -8605,7 +9171,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
#endif
/* Immediate Flip and MALL parameters */
@@ -8654,16 +9220,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
(s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
+ DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8703,10 +9268,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
#endif
}
@@ -8722,8 +9287,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
#endif
}
}
@@ -8869,7 +9434,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
}
}
@@ -8878,639 +9443,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
mode_lib->ms.support.AvgBandwidthSupport = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
#endif
}
}
}
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- s->impacted_dst_y_pre[k] = 0;
-
- s->recalc_prefetch_schedule = 0;
- s->recalc_prefetch_done = 0;
- do {
- mode_lib->ms.support.PrefetchSupported = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
-
- s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
-
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
- CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
- CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
- CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
- CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
- CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- CalculateDCFCLKDeepSleepTdlut(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.vactive_sw_bw_l,
- mode_lib->ms.vactive_sw_bw_c,
- mode_lib->soc.return_bus_width_bytes,
- mode_lib->ms.RequiredDISPCLK,
- s->tdlut_bytes_to_deliver,
- s->prefetch_swath_time_us,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
-
- // By default, do not recalc prefetch schedule
- s->recalc_prefetch_schedule = 0;
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- s->line_times[k],
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 0;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
- CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
- CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
- CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
- CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
- CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
- CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
- CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
- CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
- if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
-
- CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
- ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
-
- // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
- CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
- CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
- mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
- s->recalc_prefetch_done = 1;
- s->recalc_prefetch_schedule = 1;
- }
-#endif
- } // prefetch schedule ok, do urg bw and flip schedule
- } while (s->recalc_prefetch_schedule);
-
- // Flip Schedule
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0;
- s->mSOCParameters.SMNLatency = 0;
- s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
- s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
- }
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
- // End of Prefetch Check
+ dml_core_ms_prefetch_check(mode_lib, display_cfg);
mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
@@ -9546,8 +9485,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
+ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
@@ -9597,17 +9536,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMALLSize
&& mode_lib->ms.support.g6_temp_read_support
&& ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__);
mode_lib->ms.support.ModeSupport = true;
} else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__);
mode_lib->ms.support.ModeSupport = false;
}
}
// Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
@@ -9623,8 +9562,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
#endif
}
@@ -9632,7 +9571,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.ModeSupport)
dml2_print_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__);
#endif
return mode_lib->ms.support.ModeSupport;
@@ -9642,18 +9581,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
{
unsigned int result;
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
result = dml_core_mode_support(in_out_params);
if (result)
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -9687,19 +9626,19 @@ static void CalculatePixelDeliveryTimes(
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
+ DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
@@ -9733,10 +9672,10 @@ static void CalculatePixelDeliveryTimes(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
@@ -9752,12 +9691,12 @@ static void CalculatePixelDeliveryTimes(
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
#endif
}
}
@@ -9853,14 +9792,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
#endif
}
@@ -9881,7 +9820,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
else
p->time_per_tdlut_group[k] = 0;
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
if (p->display_cfg->gpuvm_enable == true) {
if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -9897,14 +9836,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
@@ -9928,9 +9867,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
@@ -9945,17 +9884,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
p->time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
@@ -9991,18 +9930,18 @@ static void CalculateVMGroupAndRequestTimes(
double line_time;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (display_cfg->gpuvm_enable) {
@@ -10071,13 +10010,13 @@ static void CalculateVMGroupAndRequestTimes(
else
TimePerVMRequestFlip[k] = 0.0;
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip);
if (display_cfg->gpuvm_max_page_table_levels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
@@ -10094,10 +10033,10 @@ static void CalculateVMGroupAndRequestTimes(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
}
@@ -10113,7 +10052,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
unsigned int SingleVTotal = 0;
bool SameTiming = true;
bool FoundCriticalSurface = false;
- double LastZ8StutterPeriod = 0;
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
@@ -10127,9 +10065,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
@@ -10142,9 +10080,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
@@ -10160,19 +10098,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
#endif
if (l->AverageDCCZeroSizeFraction == 1) {
l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
@@ -10189,10 +10127,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
+ DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
#endif
} else {
l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
@@ -10200,16 +10138,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
+ DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
+ DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
#endif
*p->StutterPeriod = 0;
@@ -10220,15 +10158,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
#endif
if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
@@ -10248,17 +10186,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
#endif
}
}
@@ -10276,14 +10214,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
#endif
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
@@ -10292,10 +10230,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
/ math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
+ DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
#endif
l->TotalActiveWriteback = 0;
memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool));
@@ -10324,9 +10262,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (l->TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
+ DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
#endif
*p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
*p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
@@ -10339,11 +10277,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
*p->Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
@@ -10358,7 +10296,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
+ //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
*p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
} else {
@@ -10370,25 +10308,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
- dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming);
- dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
+ DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming);
+ DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
+ DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
*p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -10422,7 +10360,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
double max_uclk_mhz = 0;
double min_return_latency_in_DCFCLK_cycles = 0;
- dml2_printf("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch));
memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
@@ -10444,13 +10382,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10476,51 +10414,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0);
+ DML_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dcfclk > 0);
- DML2_ASSERT(mode_lib->mp.FabricClock > 0);
- DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
- DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
- DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
- DML2_ASSERT(mode_lib->mp.Dispclk > 0);
- DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
- DML2_ASSERT(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
+ DML_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML_ASSERT(s->SOCCLK > 0);
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
+ DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
+ }
+ DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
+ DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
+ DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
}
for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -10617,8 +10555,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -11097,7 +11035,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
@@ -11173,8 +11111,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.WritebackDelay[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
#endif
}
@@ -11183,7 +11121,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
if (s->num_active_planes > 1) {
@@ -11219,12 +11157,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
@@ -11261,7 +11199,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
#endif
CalculatePrefetchSchedule_params->display_cfg = display_cfg;
CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
@@ -11325,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11356,7 +11294,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
} // for k
@@ -11366,9 +11304,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
if (mode_lib->mp.dst_y_prefetch[k] < 2)
@@ -11377,24 +11315,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
- dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
}
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
mode_lib->mp.PrefetchModeSupported = false;
}
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
+ DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
// Prefetch schedule OK, now check prefetch bw
@@ -11422,24 +11360,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
&mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
#endif
}
@@ -11468,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11503,11 +11441,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.PrefetchModeSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
@@ -11533,12 +11471,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
#endif
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -11608,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -11631,13 +11569,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.ImmediateFlipSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->mp.ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
#endif
}
}
@@ -11650,28 +11588,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
}
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
} else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
// DCC Configuration
for (k = 0; k < s->num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
#endif
CalculateDCCConfiguration(
display_cfg->plane_descriptors[k].surface.dcc.enable,
@@ -11780,8 +11718,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
+ DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
+ DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
@@ -11893,15 +11831,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
@@ -11920,9 +11858,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
+ DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n");
s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
@@ -11936,23 +11874,23 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
//Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
+ mode_lib->mp.TotalWRBandwidth = 0;
for (k = 0; k < display_cfg->num_streams; ++k) {
s->WRBandwidth = 0;
if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
@@ -11961,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
(display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
/ ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
* (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
+ mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth;
}
}
@@ -11969,9 +11907,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -12051,28 +11989,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
+ DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__);
#endif
return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
}
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: result = %0d\n", __func__, result);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -12130,16 +12068,16 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY;
unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
- dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
- dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
- dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
- dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
- dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
- dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
- dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
+ DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
+ DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
+ DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
+ DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+ DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
unsigned int dummy_integer[21];
@@ -12193,16 +12131,16 @@ void dml2_core_calcs_get_dpte_row_height(
CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
+ DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
#endif
}
static bool is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -12220,6 +12158,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -12246,11 +12186,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs
cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
+ DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
#endif
cursor_dlg_regs->chunk_hdl_adjust = 3;
@@ -12286,7 +12226,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
double stored_swath_c_bytes;
bool is_phantom_pipe;
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
@@ -12329,19 +12269,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
+ DML_ASSERT(p0_pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
#endif
if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
+ DML_ASSERT(p1_pte_row_height_linear >= 8);
}
rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
}
@@ -12375,12 +12315,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
@@ -12388,15 +12328,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
+ DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ //DML_LOG_VERBOSE_rq_regs_st(rq_regs);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
static void rq_dlg_get_dlg_reg(
@@ -12411,10 +12351,10 @@ static void rq_dlg_get_dlg_reg(
memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES);
+ DML_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -12444,18 +12384,18 @@ static void rq_dlg_get_dlg_reg(
l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
+ DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
+ DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
+ DML_ASSERT(l->refclk_freq_in_mhz != 0);
+ DML_ASSERT(l->pclk_freq_in_mhz != 0);
+ DML_ASSERT(l->ref_freq_to_pix_freq < 4.0);
// Need to figure out which side of odm combine we're in
// Assume the pipe instance under the same plane is in order
@@ -12484,14 +12424,14 @@ static void rq_dlg_get_dlg_reg(
l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
+ DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
}
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
@@ -12500,20 +12440,20 @@ static void rq_dlg_get_dlg_reg(
l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
+ DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
@@ -12521,28 +12461,28 @@ static void rq_dlg_get_dlg_reg(
l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
+ DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
}
l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
// Active
l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
l->refcyc_per_line_delivery_pre_c = 0.0;
l->refcyc_per_line_delivery_c = 0.0;
@@ -12551,8 +12491,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
}
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12561,8 +12501,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
l->refcyc_per_req_delivery_pre_c = 0.0;
l->refcyc_per_req_delivery_c = 0.0;
@@ -12570,16 +12510,16 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
}
// TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
+ DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
// Assign to register structures
disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
+ DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
@@ -12592,10 +12532,10 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12662,11 +12602,11 @@ static void rq_dlg_get_dlg_reg(
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
+ // CHECK for HW registers' range, DML_ASSERT or clamp
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
@@ -12680,16 +12620,16 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
}
if (l->dual_plane) {
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
}
}
@@ -12700,20 +12640,20 @@ static void rq_dlg_get_dlg_reg(
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
}
@@ -12736,11 +12676,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
+ DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
+ DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
#endif
}
@@ -13013,10 +12953,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
+ DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us);
#endif
}
@@ -13129,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4;
out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
@@ -13313,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
- out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 4e502f0a6d20..28687565ac22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe {
double DCFClkDeepSleep;
unsigned int DPPPerSurface;
bool ScalerEnabled;
+ bool UPSPEnabled;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate {
dml2_core_internal_output_rate_hdmi_rate_6x4 = 9,
dml2_core_internal_output_rate_hdmi_rate_8x4 = 10,
dml2_core_internal_output_rate_hdmi_rate_10x4 = 11,
- dml2_core_internal_output_rate_hdmi_rate_12x4 = 12
+ dml2_core_internal_output_rate_hdmi_rate_12x4 = 12,
+ dml2_core_internal_output_rate_hdmi_rate_16x4 = 13,
+ dml2_core_internal_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_core_internal_watermarks {
@@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info {
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
bool EnoughUrgentLatencyHidingSupport;
+ bool PrefetchScheduleSupported;
bool PrefetchSupported;
bool PrefetchBandwidthSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
+ bool ODMSupport;
bool ModeSupport;
bool ViewportSizeSupport;
@@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info {
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor
double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
double max_urgent_latency_us;
double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
@@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info {
bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
+ bool dcfclk_support;
+ bool qos_bandwidth_support;
};
struct dml2_core_internal_mode_support {
@@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support {
double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes
+ double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes
double uclk_freq_mhz;
double dram_bw_mbps;
double max_dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state
double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
@@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support {
double TWait[DML2_MAX_PLANES];
bool UnboundedRequestEnabled;
+ unsigned int compbuf_reserved_space_64b;
+ bool hw_debug5;
unsigned int CompressedBufferSizeInkByte;
double VRatioPreY[DML2_MAX_PLANES];
double VRatioPreC[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int swath_width_luma_ub[DML2_MAX_PLANES];
unsigned int swath_width_chroma_ub[DML2_MAX_PLANES];
unsigned int RequiredSlots[DML2_MAX_PLANES];
@@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support {
double dst_y_prefetch[DML2_MAX_PLANES];
double LinesForVM[DML2_MAX_PLANES];
double LinesForDPTERow[DML2_MAX_PLANES];
- double SwathWidthYSingleDPP[DML2_MAX_PLANES];
- double SwathWidthCSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
double BytePerPixelInDETY[DML2_MAX_PLANES];
@@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support {
double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe
double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES];
+ bool is_using_mall_for_ss[DML2_MAX_PLANES];
+ unsigned int meta_row_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES];
+ bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
+ unsigned int meta_req_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES];
+ unsigned int meta_req_width[DML2_MAX_PLANES];
+ unsigned int meta_row_width[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeY[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeC[DML2_MAX_PLANES];
+ unsigned int meta_req_height[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_req_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES];
+ unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
+ unsigned int vm_group_bytes[DML2_MAX_PLANES];
+ unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
+ double TSetup[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
+ unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos.
+ double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
+ double MaxActiveFCLKChangeLatencySupported;
+
// Backend
bool RequiresDSC[DML2_MAX_PLANES];
bool RequiresFEC[DML2_MAX_PLANES];
double OutputBpp[DML2_MAX_PLANES];
+ double DesiredOutputBpp[DML2_MAX_PLANES];
+ double PixelClockBackEnd[DML2_MAX_PLANES];
unsigned int DSCDelay[DML2_MAX_PLANES];
enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES];
enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES];
+ bool TotalAvailablePipesSupportNoDSC;
+ bool TotalAvailablePipesSupportDSC;
+ unsigned int NumberOfDPPNoDSC;
+ unsigned int NumberOfDPPDSC;
+ enum dml2_odm_mode ODMModeNoDSC;
+ enum dml2_odm_mode ODMModeDSC;
+ double RequiredDISPCLKPerSurfaceNoDSC;
+ double RequiredDISPCLKPerSurfaceDSC;
+ unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES];
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
@@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
- /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */
- double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
+ /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches.
+ * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp
+ *
+ * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support.
+ * Some slight difference in variables may cause the pixel data bandwidth to be higher
+ * even though overall equ prefetch bandwidths can be lower going from ms to mp
+ */
+ double RequiredPrefetchBWMax[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support {
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
+ unsigned int meta_row_height_luma[DML2_MAX_PLANES];
+ unsigned int meta_row_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+
+ unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+
+ unsigned int MaximumVStartup[DML2_MAX_PLANES];
+
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+
+ unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
+ unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program {
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
//double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
@@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program {
unsigned int MacroTileHeightC[DML2_MAX_PLANES];
unsigned int MacroTileWidthY[DML2_MAX_PLANES];
unsigned int MacroTileWidthC[DML2_MAX_PLANES];
+ double MaximumSwathWidthLuma[DML2_MAX_PLANES];
+ double MaximumSwathWidthChroma[DML2_MAX_PLANES];
bool surf_linear128_l[DML2_MAX_PLANES];
bool surf_linear128_c[DML2_MAX_PLANES];
@@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program {
double UrgentBurstFactorChroma[DML2_MAX_PLANES];
double UrgentBurstFactorChromaPre[DML2_MAX_PLANES];
+ double MaximumSwathWidthInLineBufferLuma;
+ double MaximumSwathWidthInLineBufferChroma;
+
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
double meta_row_bw[DML2_MAX_PLANES];
unsigned int meta_row_bytes[DML2_MAX_PLANES];
unsigned int meta_req_width[DML2_MAX_PLANES];
@@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program {
unsigned int PTERequestSizeC[DML2_MAX_PLANES];
double TWait[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
double TSetup[DML2_MAX_PLANES];
unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
@@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program {
double TCalc;
unsigned int TotImmediateFlipBytes;
+ unsigned int MaxTotalDETInKByte;
+ unsigned int NomDETInKByte;
+ unsigned int MinCompressedBufferSizeInKByte;
+ double PixelClockBackEnd[DML2_MAX_PLANES];
+ double OutputBpp[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+ unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ bool immediate_flip_required; // any pipes need immediate flip
+ double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
+ double TotalWRBandwidth;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+
// -------------------
// Output
// -------------------
@@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program {
// Support
bool UrgVactiveBandwidthSupport;
+ bool PrefetchScheduleSupported;
+ bool UrgentBandwidthSupport;
bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported
bool ImmediateFlipSupported;
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
+ bool dcfclk_support;
// Clock
double Dcfclk;
@@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program {
// RQ registers
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
-
+ double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES];
bool is_using_mall_for_ss[DML2_MAX_PLANES];
@@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals {
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
- unsigned int dummy_integer_array[2][DML2_MAX_PLANES];
+ unsigned int dummy_integer_array[4][DML2_MAX_PLANES];
enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES];
double dummy_single_array[2][DML2_MAX_PLANES];
- unsigned int dummy_long_array[4][DML2_MAX_PLANES];
+ unsigned int dummy_long_array[8][DML2_MAX_PLANES];
bool dummy_boolean_array[2][DML2_MAX_PLANES];
bool dummy_boolean[2];
double dummy_single[2];
@@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals {
double dlg_vblank_start;
double LSetup;
double blank_lines_remaining;
- double TotalWRBandwidth;
double WRBandwidth;
struct dml2_core_internal_DmlPipe myPipe;
double PixelClockBackEndFactor;
@@ -1078,6 +1224,8 @@ struct dml2_core_calcs_mode_programming_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
@@ -1151,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1387,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
- double flip_and_prefetch_bw_oto;
+ double flip_and_prefetch_bw_max;
double active_and_excess_bw;
};
@@ -1416,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals {
struct dml2_core_shared_rq_dlg_get_dlg_reg_locals {
unsigned int plane_idx;
+ unsigned int stream_idx;
enum dml2_source_format_class source_format;
const struct dml2_timing_cfg *timing;
bool dual_plane;
@@ -1623,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
double *BytePerPixDETC;
unsigned int *DPPPerSurface;
bool mrq_present;
+ unsigned int dummy[2][DML2_MAX_PLANES];
+ unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES];
+ unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES];
// output
unsigned int *req_per_swath_ub_l;
@@ -1640,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
unsigned int *DETBufferSizeC;
unsigned int *full_swath_bytes_l;
unsigned int *full_swath_bytes_c;
+ unsigned int *full_swath_bytes_single_dpp_l;
+ unsigned int *full_swath_bytes_single_dpp_c;
bool *UnboundedRequestEnabled;
unsigned int *compbuf_reserved_space_64b;
unsigned int *CompressedBufferSizeInkByte;
@@ -1799,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
- double *RequiredPrefetchBWOTO;
+ double *RequiredPrefetchBWMax;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -2036,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
- double *prefetch_bandwidth_oto;
+ double *prefetch_bandwidth_max;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 2504d9c2ec34..5f301befed16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
val = 1;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -216,104 +216,104 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
@@ -358,9 +358,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di
out_bpp[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
#endif
}
}
@@ -391,7 +391,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
#endif
return num_active_pipes;
}
@@ -452,7 +452,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
};
}
@@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+ return sw_mode == dml2_sw_linear;
};
@@ -498,8 +498,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -511,7 +511,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -524,8 +524,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
#endif
return index;
}
@@ -533,32 +533,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 15507926f3a4..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
- if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
-
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
@@ -754,6 +751,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -768,6 +767,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index f4b1a7d02d42..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index f50662b83296..d88b3e0082dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -659,7 +659,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
switch (i) {
case 1:
- DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -670,7 +670,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
- DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -681,7 +681,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
- DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -692,7 +692,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
- DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index dc2ce5e77f57..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -761,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache
total_mcaches_required--;
}
}
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+ DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
deleted file mode 100644
index c506667897c4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-int dml2_log_internal(const char *format, ...)
-{
- return 0;
-}
-
-int dml2_printf(const char *format, ...)
-{
-#ifdef _DEBUG
-#ifdef _DEBUG_PRINTS
- int result;
- va_list args;
- va_start(args, format);
-
- result = vprintf(format, args);
-
- va_end(args);
-
- return result;
-#else
- return 0;
-#endif
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index bfe6f236d2e4..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -5,55 +5,185 @@
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
-#ifndef DML2_ASSERT
-#define DML2_ASSERT(condition) ((void)0)
-#endif
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/*
- * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
- * DML_LOG_ERROR - unexpected but recoverable failures inside DML
- * DML_LOG_WARN - unexpected inputs or events to DML
- * DML_LOG_INFO - high level tracing of DML interfaces
- * DML_LOG_DEBUG - detailed tracing of DML internal components
- * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
- */
-#if !defined(DML_LOG_LEVEL)
-#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
-/* for backward compatibility with old macros */
-#define DML_LOG_LEVEL 5
-#else
-#define DML_LOG_LEVEL 0
-#endif
-#endif
+/* private helper macros */
+#define _BOOL_FORMAT(field) "%s", field ? "true" : "false"
+#define _UINT_FORMAT(field) "%u", field
+#define _INT_FORMAT(field) "%d", field
+#define _DOUBLE_FORMAT(field) "%lf", field
+#define _ELEMENT_FUNC "function"
+#define _ELEMENT_COMP_IF "component_interface"
+#define _ELEMENT_TOP_IF "top_interface"
+#define _LOG_ENTRY(element) do { \
+ DML_LOG_INTERNAL("<"element" name=\""); \
+ DML_LOG_INTERNAL(__func__); \
+ DML_LOG_INTERNAL("\">\n"); \
+} while (0)
+#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n")
+#define _LOG_SCALAR(field, format) do { \
+ DML_LOG_INTERNAL(#field" = "format(field)); \
+ DML_LOG_INTERNAL("\n"); \
+} while (0)
+#define _LOG_ARRAY(field, size, format) do { \
+ DML_LOG_INTERNAL(#field " = ["); \
+ for (int _i = 0; _i < (int) size; _i++) { \
+ DML_LOG_INTERNAL(format(field[_i])); \
+ if (_i + 1 == (int) size) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+}} while (0)
+#define _LOG_2D_ARRAY(field, size0, size1, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j])); \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL("["); \
+ for (int _k = 0; _k < (int) size2; _k++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j][_k])); \
+ if (_k + 1 == (int) size2) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
-#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= 1
-#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* tracing of DML internal executions */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
+
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+/* public macros for DML_LOG_LEVEL_FATAL and up */
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_ERROR and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 2
-#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_WARN and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 3
-#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_INFO and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
+#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF)
+#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
+#define DML_LOG_TOP_IF_ENTER() ((void)0)
+#define DML_LOG_TOP_IF_EXIT() ((void)0)
#endif
-#if DML_LOG_LEVEL >= 4
-#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_DEBUG and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF)
+#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF)
+#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC)
+#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC)
+#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT)
+#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT)
+#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#define DML_LOG_COMP_IF_ENTER() ((void)0)
+#define DML_LOG_COMP_IF_EXIT() ((void)0)
+#define DML_LOG_FUNC_ENTER() ((void)0)
+#define DML_LOG_FUNC_EXIT() ((void)0)
+#define DML_LOG_DEBUG_BOOL(field) ((void)0)
+#define DML_LOG_DEBUG_UINT(field) ((void)0)
+#define DML_LOG_DEBUG_INT(field) ((void)0)
+#define DML_LOG_DEBUG_DOUBLE(field) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 5
-#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_VERBOSE */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
-#endif
-
-int dml2_log_internal(const char *format, ...);
-int dml2_printf(const char *format, ...);
-
-#endif
+#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index d8d01dceacdd..d52aa82283b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table {
} max_clocks_khz;
struct {
+ unsigned int dispclk;
+ unsigned int dppclk;
+ unsigned int dtbclk;
+ } max_ss_clocks_khz;
+
+ struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
@@ -196,6 +202,8 @@ struct dml2_core_mode_support_result {
} active;
unsigned int dispclk_khz;
+ unsigned int dpprefclk_khz;
+ unsigned int dtbrefclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
@@ -440,13 +448,17 @@ struct dml2_core_internal_state_intermediates {
};
struct dml2_core_mode_support_locals {
- struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ union {
+ struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
- struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ union {
+ struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a966abd40788..5f1b49a50049 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -1082,22 +1082,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
if (stream_disp_cfg_index >= disp_cfg_index_max)
continue;
- if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
- scratch.odm_info.odm_factor = 1;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
- scratch.odm_info.odm_factor = 2;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
- scratch.odm_info.odm_factor = 4;
- } else {
- ASSERT(false);
- scratch.odm_info.odm_factor = 1;
- }
-
+ if (ctx->architecture == dml2_architecture_20) {
+ if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ } else if (ctx->architecture == dml2_architecture_21) {
/* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
* This is not an issue with new resource management logic. This block ensure backcompat
* with legacy pipe management with updated DML.
* */
- if (ctx->architecture == dml2_architecture_21) {
if (ODMMode[stream_disp_cfg_index] == 1) {
scratch.odm_info.odm_factor = 1;
} else if (ODMMode[stream_disp_cfg_index] == 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 6b3b8803e0ae..a56e75cdf712 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 2061d43b92e1..3b866e876bf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -896,7 +896,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig
out->SurfaceWidthC[location] = in->timing.h_addressable;
out->SurfaceHeightC[location] = in->timing.v_addressable;
out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128;
- out->PitchC[location] = 0;
+ out->PitchC[location] = 1;
out->DCCEnable[location] = false;
out->DCCMetaPitchY[location] = 0;
out->DCCMetaPitchC[location] = 0;
@@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;
@@ -973,7 +974,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
+static struct scaler_data *get_scaler_data_for_plane(
+ const struct dc_plane_state *in,
+ struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -994,7 +997,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
- memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
+ return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1057,11 +1060,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
- struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
- if (!scaler_data)
- return;
-
- get_scaler_data_for_plane(in, context, scaler_data);
+ struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1126,8 +1125,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
-
- kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
@@ -1192,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
return location;
}
-static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg)
-{
- int i;
-
- if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- ASSERT(state->stream_count == 1);
- dml_dispcfg->timing.DRRDisplay[0] = true;
- } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) {
-
- for (i = 0; i < dml_dispcfg->num_timings; i++) {
- if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id)
- dml_dispcfg->timing.DRRDisplay[i] = true;
- }
- }
-}
-
static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
{
unsigned int i;
@@ -1440,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
}
}
-
- if (!dml2->config.use_native_pstate_optimization)
- apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg);
}
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index e89571874185..0318260370ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/vmalloc.h>
-
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2,
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
const struct dml_display_cfg_st *display_cfg,
- struct dml_mode_support_info_st *evaluation_info)
+ struct dml_mode_support_info_st *evaluation_info,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
s->mode_support_params.in_display_cfg = display_cfg;
+ if (validate_mode == DC_VALIDATE_MODE_ONLY)
+ s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
+ else
+ s->mode_support_params.in_start_state_idx = 0;
s->mode_support_params.out_evaluation_info = evaluation_info;
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
@@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
{
int unused_dpps = p->ip_params->max_num_dpp;
- int i, j;
- int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
- int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
- float frame_time_sec, max_frame_time_sec;
+ int i;
+ int odms_needed;
int largest_blend_and_timing = 0;
bool optimization_done = false;
@@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
if (p->new_display_config != p->cur_display_config)
*p->new_display_config = *p->cur_display_config;
- // Optimize P-State Support
- if (dml2->config.use_native_pstate_optimization) {
- if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
- // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
- subvp_timing_to_add = -1;
- subvp_surface_to_add = -1;
- max_frame_time_sec = 0;
- surface_count = 0;
- for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
- refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
- (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
- if (refresh_rate_hz < 120) {
- // Check its upstream surfaces to see if this one could be converted to subvp.
- dpps_needed = 0;
- for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
- if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
- p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
- dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
- subvp_surface_to_add = j;
- surface_count++;
- }
- }
-
- if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
- frame_time_sec = (float)1 / refresh_rate_hz;
- if (frame_time_sec > max_frame_time_sec) {
- max_frame_time_sec = frame_time_sec;
- subvp_timing_to_add = i;
- }
- }
- }
- }
- if (subvp_timing_to_add >= 0) {
- new_timing_index = p->new_display_config->num_timings++;
- new_surface_index = p->new_display_config->num_surfaces++;
- // Add a phantom pipe reflecting the main pipe's timing
- dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
-
- pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
- p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
- (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
- (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
-
- subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
-
- p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
- p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
- p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
-
- p->new_display_config->output.OutputDisabled[new_timing_index] = true;
-
- p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
-
- dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
- dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
-
- p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
-
- p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
- p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
-
- p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
-
- p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
-
- p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
-
- optimization_done = true;
- }
- }
- }
// Optimize Clocks
if (!optimization_done) {
@@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
return optimization_done;
}
-static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
+static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
@@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
}
- dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
+ dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
+ validate_mode);
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
@@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const
}
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
- struct dc_state *display_state)
+ struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
unsigned int result = 0, i;
@@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
@@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
dml2->v20.dml_core_ctx.policy = s->new_policy;
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
&s->new_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (optimized_result)
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
@@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
if (!optimized_result) {
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
}
}
@@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
return result;
}
-static int find_drr_eligible_stream(struct dc_state *display_state)
-{
- int i;
-
- for (i = 0; i < display_state->stream_count; i++) {
- if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
- && display_state->streams[i]->ignore_msa_timing_param) {
- // Use ignore_msa_timing_param flag to identify as DRR
- return i;
- }
- }
-
- return -1;
-}
-
-static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
-{
- struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- bool pstate_optimization_done = false;
- bool pstate_optimization_success = false;
- bool result = false;
- int drr_display_index = 0, non_svp_streams = 0;
- bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
-
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
-
- result = dml_mode_support_wrapper(dml2, display_state);
-
- if (!result) {
- pstate_optimization_done = true;
- } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- }
-
- if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
-
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- non_svp_streams = display_state->stream_count;
-
- while (!pstate_optimization_done) {
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
-
- // Always try adding SVP first
- if (result)
- result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
- else
- pstate_optimization_done = true;
-
-
- if (result) {
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- pstate_optimization_done = true;
- }
-
- if (result) {
- non_svp_streams--;
-
- if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- } else {
- drr_display_index = find_drr_eligible_stream(display_state);
-
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_svp_streams == 1 && drr_display_index >= 0) {
- if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- }
-
- if (pstate_optimization_success) {
- pstate_optimization_done = true;
- } else {
- pstate_optimization_done = false;
- }
- }
- }
- }
- }
-
- if (!pstate_optimization_success) {
- dml2_svp_remove_all_phantom_pipes(dml2, display_state);
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- return result;
-}
-
-static bool call_dml_mode_support_and_programming(struct dc_state *context)
+static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
{
unsigned int result = 0;
unsigned int min_state = 0;
@@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
if (!context->streams[0]->sink->link->dc->caps.is_apu) {
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
+ validate_mode);
ASSERT(min_state_for_g6_temp_read >= 0);
}
- if (!dml2->config.use_native_pstate_optimization) {
- result = optimize_pstate_with_svp_and_drr(dml2, context);
- } else {
- result = dml_mode_support_wrapper(dml2, context);
- }
+ result = dml_mode_support_wrapper(dml2, context, validate_mode);
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
@@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
return result;
}
-static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
+static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
@@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
- result = call_dml_mode_support_and_programming(context);
+ result = call_dml_mode_support_and_programming(context, validate_mode);
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
@@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
if (need_recalculation) {
/* Engage the DML again if recalculation is required. */
- call_dml_mode_support_and_programming(context);
+ call_dml_mode_support_and_programming(context, validate_mode);
if (!dml2->config.skip_hw_state_mapping) {
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
}
@@ -663,7 +484,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_copy_clocks_to_dc_state(&out_clks, context);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
- memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
+ if (context->streams[0]->sink->link->dc->caps.is_apu)
+ dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
+ else
+ memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
@@ -681,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
return result;
}
-static bool dml2_validate_only(struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2;
unsigned int result = 0;
@@ -705,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context)
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
- &dml2->v20.scratch.mode_support_info);
+ &dml2->v20.scratch.mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
@@ -720,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -730,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
/* DML2.1 validation path */
if (dml2->architecture == dml2_architecture_21) {
- out = dml21_validate(in_dc, context, dml2, fast_validate);
+ out = dml21_validate(in_dc, context, dml2, validate_mode);
return out;
}
DC_FP_START();
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
- out = dml2_validate_only(context);
+ /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml2_validate_only(context, validate_mode);
else
- out = dml2_validate_and_build_resource(in_dc, context);
+ out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
DC_FP_END();
@@ -754,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
@@ -800,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21)
- && (in_dc->ctx->dce_version == DCN_VERSION_4_01
- ))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
@@ -871,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 785226945699..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
+struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
+ bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {
@@ -238,7 +240,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
@@ -270,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc,
* dml2_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx.
*
* DML1.0 compatible interface for validation.
*
@@ -293,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc,
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml2,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index abf439e743f2..2d70586cef40 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -790,8 +790,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_blnd_lut(dpp_base, false);
+ dpp3_power_on_blnd_lut(dpp_base, false);
return false;
}
@@ -1204,8 +1203,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_shaper(dpp_base, false);
+ dpp3_power_on_shaper(dpp_base, false);
return false;
}
@@ -1399,8 +1397,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base,
if (params == NULL) {
dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_hdr3dlut(dpp_base, false);
+ dpp3_power_on_hdr3dlut(dpp_base, false);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 62b7012cda43..f7a373a3d70a 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -138,7 +138,7 @@ bool dpp35_construct(
dpp->base.funcs = &dcn35_dpp_funcs;
// w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
- if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40)
dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 97bf26fa3573..36187f890d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
- .dpp_full_bypass = dpp401_full_bypass,
+ .dpp_full_bypass = NULL,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index ecaa976e1f52..5a6a861402b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -641,6 +641,7 @@
uint32_t ISHARP_DELTA_DATA; \
uint32_t ISHARP_DELTA_INDEX; \
uint32_t ISHARP_NLDELTA_SOFT_CLIP
+
struct dcn401_dpp_registers {
DPP_REG_VARIABLE_LIST_DCN401;
};
@@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
-void dpp401_full_bypass(struct dpp *dpp_base);
-
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 1236e0f9a256..7aab77b58869 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -88,30 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-void dpp401_full_bypass(struct dpp *dpp_base)
-{
- struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
-
- /* Input pixel format: ARGB8888 */
- REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
- CNVC_SURFACE_PIXEL_FORMAT, 0x8);
-
- /* Zero expansion */
- REG_SET_3(FORMAT_CONTROL, 0,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 0,
- FORMAT_EXPANSION_MODE, 0);
-
- /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
- if (dpp->tf_mask->CM_BYPASS_EN)
- REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
- else
- REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-
- /* Setting degamma bypass for now */
- REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
-}
-
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
@@ -120,10 +96,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
- // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- cur_rom_en = 1;
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
}
REG_UPDATE_3(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 11535922b5ff..1f53a9f0c0ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#include "clk_mgr.h"
+#include "resource.h"
+
#define DC_LOGGER \
dsc->ctx->logger
@@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing);
+
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
@@ -183,6 +191,7 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slice_count,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
return true;
}
-
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
@@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
+ unsigned int min_dsc_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, &config);
+ &options, link_encoding, min_dsc_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
}
+
+static void build_dsc_enc_combined_slice_caps(
+ const struct dsc_enc_caps *single_dsc_enc_caps,
+ struct dsc_enc_caps *dsc_enc_caps,
+ unsigned int max_odm_combine_factor)
+{
+ /* 1-16 slice configurations, single DSC */
+ dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
+
+ /* 2x DSC's */
+ if (max_odm_combine_factor >= 2) {
+ /* 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+
+ /* 8 + 8 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+ }
+
+ /* 3x DSC's */
+ if (max_odm_combine_factor >= 3) {
+ /* 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+
+ /* 4x DSC's */
+ if (max_odm_combine_factor >= 4) {
+ /* 1 + 1 + 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 + 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 3 + 3 + 3 + 3 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
+
+ /* 4 + 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+}
+
+static void build_dsc_enc_caps(
+ const struct display_stream_compressor *dsc,
+ struct dsc_enc_caps *dsc_enc_caps)
+{
+ unsigned int max_dscclk_khz;
+ unsigned int num_dsc;
+ unsigned int max_odm_combine_factor;
+ struct dsc_enc_caps single_dsc_enc_caps;
+
+ struct dc *dc;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
+ return;
+
+ dc = dsc->ctx->dc;
+
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc)
+ return;
+
+ /* get max DSCCLK from clk_mgr */
+ max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
+
+ dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
+
+ /* global capabilities */
+ dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
+ dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
+ dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
+ dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
+ dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
+ dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
+ dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
+
+ /* expand per DSC capabilities to global */
+ max_odm_combine_factor = dc->caps.max_odm_combine_factor;
+ num_dsc = dc->res_pool->res_cap->num_dsc;
+ max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
+ dsc_enc_caps->max_total_throughput_mps =
+ single_dsc_enc_caps.max_total_throughput_mps *
+ max_odm_combine_factor;
+
+ /* check slice counts possible for with ODM combine */
+ build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
+}
+
+static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
+{
+ return (value + 9) / 10;
+}
+
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing)
+{
+ unsigned int max_dispclk_khz;
+
+ /* get max pixel rate and combine caps */
+ max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
+ if (dsc && dsc->ctx->dc) {
+ if (dsc->ctx->dc->clk_mgr &&
+ dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
+ /* dispclk is available */
+ max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
+ }
+ }
+
+ /* validate parameters */
+ if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0)
+ return 1;
+
+ /* consider minimum odm slices required due to
+ * 1) display pipe throughput (dispclk)
+ * 2) max image width per slice
+ */
+ return dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
+ max_dispclk_khz), // throughput
+ dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
+ dsc_enc_caps->max_slice_width))); // slice width
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
- // This is a static HW query, so we can use any DSC
-
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc) {
- if (!dsc->ctx->dc->debug.disable_dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc)
+ return;
+
+ /* check if reported cap global or only for a single DCN DSC enc */
+ if (dsc->funcs->dsc_get_enc_caps) {
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ } else {
+ build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -621,11 +765,6 @@ static bool intersect_dsc_caps(
return true;
}
-static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
-{
- return (value + 9) / 10;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -910,11 +1049,11 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slices_h,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h = 0;
- int min_slices_h = 0;
int num_slices_h = 0;
int pic_width;
int slice_width;
@@ -1018,12 +1157,9 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- min_slices_h = pic_width / dsc_common_caps.max_slice_width;
- if (pic_width % dsc_common_caps.max_slice_width)
- min_slices_h++;
-
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
+ /* increase minimum slice count to meet sink throughput limitations */
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
@@ -1032,14 +1168,12 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
- if (pic_width % min_slices_h != 0)
- min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
-
- if (min_slices_h == 0 && max_slices_h == 0)
- is_dsc_possible = false;
+ /* increase minimum slice count to meet divisibility requirements */
+ while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
+ min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ }
+ is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
if (!is_dsc_possible)
goto done;
@@ -1162,12 +1296,19 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
-
+ unsigned int min_dsc_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, link_encoding, dsc_cfg);
+ timing,
+ options,
+ link_encoding,
+ min_dsc_slice_count,
+ dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 75128fd34306..bd1b9aef6d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -57,13 +57,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
-
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index 1fb90b52b814..a9c04fc95bd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -457,6 +457,12 @@
type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCRM_DSC_FORWARD_EN_STATUS
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4893b793fec0..7bd92ae8b13e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -9,19 +9,14 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
-#define MAX_DSC_UNIT_COMBINE 4
-
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn401_dsc_funcs = {
- .dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
@@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -45,12 +41,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
/* API functions (external or via structure->function_pointer) */
@@ -72,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
dsc->max_image_width = 5184;
}
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
{
- int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
-
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
- /* 1 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
- /* 2 slice is only supported with 1 or 2 DSC units */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
- /* 3 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
@@ -101,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
- dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
@@ -197,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index e3ca70058e64..7acd57eb4f42 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c
void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
void dsc401_disable(struct display_stream_compressor *dsc);
void dsc401_disconnect(struct display_stream_compressor *dsc);
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 1ebce5426a58..b0bd1f9425b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -108,6 +108,7 @@ struct dsc_funcs {
void (*dsc_disable)(struct display_stream_compressor *dsc);
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
+ void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index b099989d9364..942d9f0b6df2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source(
}
}
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 3f13a744d07d..01ec451004f7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -62,7 +62,7 @@ static void dal_hw_hpd_destroy(
*ptr = NULL;
}
-static enum gpio_result get_value(
+static enum gpio_result dal_hw_hpd_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
@@ -85,7 +85,7 @@ static enum gpio_result get_value(
return dal_hw_gpio_get_value(ptr, value);
}
-static enum gpio_result set_config(
+static enum gpio_result dal_hw_hpd_set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
@@ -104,9 +104,9 @@ static enum gpio_result set_config(
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
- .get_value = get_value,
+ .get_value = dal_hw_hpd_get_value,
.set_value = dal_hw_gpio_set_value,
- .set_config = set_config,
+ .set_config = dal_hw_hpd_set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
index 2546224b326a..e4496ad203b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
@@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
-
+ if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) {
+ dcn21_dchvm_init(hubbub);
+ }
return hubbub1->num_vmid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index c7765e6f09e6..f8f991785d4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -666,10 +666,29 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_fl_regs_st {
+ uint32_t lut_enable;
+ uint32_t lut_done;
+ uint32_t lut_addr_mode;
+ uint32_t lut_width;
+ uint32_t lut_tmz;
+ uint32_t lut_crossbar_sel_r;
+ uint32_t lut_crossbar_sel_g;
+ uint32_t lut_crossbar_sel_b;
+ uint32_t lut_addr_hi;
+ uint32_t lut_addr_lo;
+ uint32_t refcyc_3dlut_group;
+ uint32_t lut_fl_bias;
+ uint32_t lut_fl_scale;
+ uint32_t lut_fl_mode;
+ uint32_t lut_fl_format;
+};
+
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
struct _vcs_dpi_display_ttu_regs_st ttu_attr;
struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct dcn_fl_regs_st fl_regs;
uint32_t pixel_format;
uint32_t inuse_addr_hi;
uint32_t inuse_addr_lo;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 5ed195377a6c..705b98b1b6cc 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
+ REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
}
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
@@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
- .hubp_clear_tiling = hubp2_clear_tiling,
+ .hubp_clear_tiling = hubp401_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 6e1d4c90ddd4..608e6153fa68 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 40ecebea1ba0..bee617ca0838 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+HWSS_DCE60 = dce60_hwseq.o
+
+AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60)
+endif
+
+###############################################################################
+
+HWSS_DCE80 = dce80_hwseq.o
+
+AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
+
+###############################################################################
+
HWSS_DCE = dce_hwseq.o
AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE))
@@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120)
-###############################################################################
-
-HWSS_DCE80 = dce80_hwseq.o
-
-AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 5656d10368ad..4ea13d0bf815 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -952,8 +952,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t pwrseq_instance = 0;
- unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
- unsigned int post_T7_delay = OLED_POST_T7_DELAY;
+ unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
+ unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1069,7 +1069,8 @@ void dce110_edp_backlight_control(
if (!enable) {
/*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
- msleep(pre_T11_delay);
+ if (pre_T11_delay)
+ msleep(pre_T11_delay);
}
}
@@ -1185,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- if (dccg && dccg->funcs->set_dtbclk_dto)
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) {
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1220,8 +1223,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
+ if (hws && hws->wa_state.skip_blank_stream)
+ return;
+
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- if (!link->skip_implict_edp_power_control)
+ if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
@@ -1375,7 +1381,7 @@ static void populate_audio_dp_link_info(
}
}
-static void build_audio_output(
+void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output)
@@ -1680,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ /* Temporary workaround to perform DSC programming ahead of stream enablement
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, true);
+ }
+
if (!stream->dpms_off)
dc->link_srv->set_dpms_on(context, pipe_ctx);
@@ -1921,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
edp_stream->sink, &edp_stream->timing);
+
+ // For Mux-platform, the default value is false.
+ // Disable fast boot during mux switching.
+ // The flag would be clean after switching done.
+ if (dc->is_switch_in_progress_dest && edp_link->is_dds)
+ can_apply_edp_fast_boot = false;
+
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
if (can_apply_edp_fast_boot) {
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
@@ -1964,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (edp_with_sink_num)
edp_link_with_sink = edp_links_with_sink[0];
+ // During a mux switch, powering down the HW blocks and then enabling
+ // the link via a DPCD SET_POWER write causes a brief flash
+ keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
+
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
@@ -2763,12 +2793,12 @@ static void dce110_enable_per_frame_crtc_position_reset(
}
-static void init_pipes(struct dc *dc, struct dc_state *context)
+static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
// Do nothing
}
-static void init_hw(struct dc *dc)
+static void dce110_init_hw(struct dc *dc)
{
int i;
struct dc_bios *bp;
@@ -3327,7 +3357,7 @@ void dce110_disable_link_output(struct dc_link *link,
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
- .init_hw = init_hw,
+ .init_hw = dce110_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
@@ -3371,7 +3401,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
};
static const struct hwseq_private_funcs dce110_private_funcs = {
- .init_pipes = init_pipes,
+ .init_pipes = dce110_init_pipes,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 06789ac3a224..7cd8c1576988 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -110,5 +110,9 @@ void dce110_enable_dp_link_output(
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
+void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
index 44b56490e152..a08e9f9eec17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
-#include "dce60_hw_sequencer.h"
+#include "dce60_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hwseq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
index f3b2d8b60d5b..f3b2d8b60d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 912f96323ed6..39910f73ecd0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
@@ -205,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
}
+ DTN_INFO("\n=======HUBP FL======\n");
+ DTN_INFO(
+ "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+
+ if (!s->blank_en) {
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ pool->hubps[i]->inst,
+ fl_regs->lut_enable,
+ fl_regs->lut_done,
+ fl_regs->lut_addr_mode,
+ fl_regs->lut_width,
+ fl_regs->lut_tmz,
+ fl_regs->lut_crossbar_sel_r,
+ fl_regs->lut_crossbar_sel_g,
+ fl_regs->lut_crossbar_sel_b,
+ fl_regs->lut_addr_hi,
+ fl_regs->lut_addr_lo,
+ fl_regs->refcyc_3dlut_group,
+ fl_regs->lut_fl_bias,
+ fl_regs->lut_fl_scale,
+ fl_regs->lut_fl_mode,
+ fl_regs->lut_fl_format);
+ DTN_INFO("\n");
+ }
+ }
+
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
@@ -389,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.num_3dluts,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
+ DTN_INFO("===== MPC RMCM 3DLUT =====\n");
+ DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ }
+ DTN_INFO("\n");
+ DTN_INFO("===== MPC RMCM Shaper =====\n");
+ DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
+ s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
+ s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
+ s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
+ s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
+ }
}
void dcn10_log_hw_state(struct dc *dc,
@@ -2664,7 +2845,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index 42ffd1e1299c..57d30ea225f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 846c9c51f2d9..3207addbd4eb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -76,6 +76,7 @@ void dcn20_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
@@ -89,15 +90,15 @@ void dcn20_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s",
dpp->inst,
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
@@ -114,10 +115,17 @@ void dcn20_log_color_state(struct dc *dc,
(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 1) ? "RAM A" :
- ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
- ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
- "SW"),
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
s.gamut_remap.temperature_matrix[0].value,
s.gamut_remap.temperature_matrix[1].value,
s.gamut_remap.temperature_matrix[2].value,
@@ -130,6 +138,8 @@ void dcn20_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -273,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock(
}
/* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
@@ -946,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1961,14 +1970,6 @@ static void dcn20_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2053,7 +2054,7 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -2482,7 +2483,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2806,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe(
{
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct dtbclk_dto_params dto_params = {0};
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -2866,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe(
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg
+ && dc->ctx->dce_version >= DCN_VERSION_3_5) {
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index e89ebfda4873..37a239219dfe 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -74,6 +74,7 @@ void dcn30_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
@@ -88,16 +89,16 @@ void dcn30_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
- " %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s",
dpp->inst,
s.pre_dgam_mode,
(s.pre_dgam_select == 0) ? "sRGB" :
@@ -121,7 +122,14 @@ void dcn30_log_color_state(struct dc *dc,
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "RAM A" :
- "RAM B"),
+ "RAM B"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
@@ -137,6 +145,8 @@ void dcn30_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index f38340aa3f15..8ba934b83957 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -526,9 +526,15 @@ static void dcn31_reset_back_end_for_pipe(
link = pipe_ctx->stream->link;
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
+
if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
- (link->connector_signal == SIGNAL_TYPE_EDP))
+ (link->connector_signal == SIGNAL_TYPE_EDP)) {
dc->hwss.blank_stream(pipe_ctx);
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = true;
+ }
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
@@ -556,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
+ /* Temporary workaround to perform DSC programming ahead of pipe reset
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
+ }
+
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
@@ -570,7 +589,8 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.audio = NULL;
}
}
-
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index be26c925fdfa..e68f21fd5f0f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index cd0adf72b223..416b1dca3dac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (!odm_dsc)
+ continue;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -1181,6 +1183,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
+ struct dce_hwseq *hws = stream->ctx->dc->hwseq;
two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1201,7 +1204,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
+ hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 922b8d71cf1a..a267f574b619 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -241,11 +241,6 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
- for (i = 0; i < res_pool->pipe_count; i++)
- res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -901,12 +896,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
-
+ /*make sure DPPCLK is on*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -923,6 +924,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
@@ -938,6 +940,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -955,7 +959,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
-/*to do, need to support both case*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
+
hubp->power_gated = true;
hubp->funcs->hubp_reset(hubp);
@@ -967,6 +972,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
+ //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
+
}
void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
@@ -1040,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (dc->caps.sequential_ono) {
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
+ pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+ }
}
}
@@ -1186,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
+ update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
+ update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
+ update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (new_pipe->plane_res.hubp &&
+ new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+ }
+ }
+ }
+
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
@@ -1543,7 +1578,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
- if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 6a82a865209c..a3ccf805bd16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 902a96940a01..58f2be2a326b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -158,10 +158,12 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.set_mcm_luts = dcn32_set_mcm_luts,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 5489f3d431f6..cc9f40d97af2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2,6 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "os_types.h"
#include "dm_services.h"
#include "basics/dc_common.h"
#include "dm_helpers.h"
@@ -49,7 +51,7 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn401_initialize_min_clocks(struct dc *dc)
+void dcn401_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
@@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
- dc->caps.dcmode_power_limits_present =
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
+ dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
+ dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg
@@ -407,17 +404,16 @@ void dcn401_populate_mcm_luts(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format;
+ enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool is_17x17x17 = true;
bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
@@ -442,7 +438,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
/* Shaper */
- if (mcm_luts.shaper) {
+ if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.shaper->pwl;
@@ -454,11 +450,11 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->mcm.populate_lut)
+ mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
}
/* 3DLUT */
@@ -467,6 +463,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+
if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
if (mpc->funcs->populate_lut)
@@ -476,16 +473,35 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpcc_id);
}
break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ width = hubp_3dlut_fl_width_transformed;
+ break;
+ default:
+ //TODO: handle default case
+ break;
+ }
+
+ //check for support
+ if (mpc->funcs->mcm.is_config_supported &&
+ !mpc->funcs->mcm.is_config_supported(width))
+ break;
if (mpc->funcs->program_lut_read_write_control)
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
if (mpc->funcs->program_lut_mode)
mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_3dlut_size)
- mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
+
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+
+ if (mpc->funcs->mcm.program_bit_depth)
+ mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
mode = hubp_3dlut_fl_mode_native_1;
@@ -512,7 +528,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- default:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
break;
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
@@ -524,11 +539,19 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
+ if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->mcm.program_bias_scale) {
+ mpc->funcs->mcm.program_bias_scale(mpc,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
+ }
+ //navi 4x has a bug and r and blue are swapped and need to be worked around here in
+ //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
default:
@@ -540,21 +563,16 @@ void dcn401_populate_mcm_luts(struct dc *dc,
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b,
- crossbar_bit_slice_cr_r);
+ crossbar_bit_slice_cb_b);
+
+ if (mpc->funcs->mcm.program_lut_read_write_control)
+ mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+
+ if (mpc->funcs->mcm.program_3dlut_size)
+ mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- default:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_width)
- hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
@@ -1980,23 +1998,15 @@ void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw ||
- (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
- pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2081,7 +2091,7 @@ void dcn401_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
if (pipe->plane_state->triplebuffer_flips)
BREAK_TO_DEBUGGER();
@@ -2371,7 +2381,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2622,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ }
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
@@ -2636,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
- if (org_ip_request_cntl == 0)
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 781cf0efccc6..2621b7725267 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,4 +109,5 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+void dcn401_initialize_min_clocks(struct dc *dc);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index c8b5ed834579..9df8030e37f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -195,6 +195,8 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ /* This must be the last value in this enum, add new ones above */
+ HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
struct block_sequence {
@@ -202,6 +204,8 @@ struct block_sequence {
enum block_sequence_func func;
};
+#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
+
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
/* Embedded Display Related */
@@ -498,6 +502,9 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color);
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
@@ -534,13 +541,13 @@ void set_drr_and_clear_adjust_pending(
struct drr_params *params);
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps);
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 22a5d4a03c98..1e2d247fbbac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -49,6 +49,7 @@ struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
bool disallow_self_refresh_during_multi_plane_transition_applied;
unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
+ bool skip_blank_stream;
};
struct pipe_ctx;
@@ -183,6 +184,8 @@ struct hwseq_private_funcs {
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index b5afd8c3103d..82085d9c3f40 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -26,6 +26,8 @@
#ifndef _CORE_STATUS_H_
#define _CORE_STATUS_H_
+#include "dc_hw_types.h"
+
enum dc_status {
DC_OK = 1,
@@ -56,6 +58,8 @@ enum dc_status {
DC_NO_LINK_ENC_RESOURCE = 26,
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
+ DC_FAIL_HW_CURSOR_SUPPORT = 29,
+ DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d0021f25f3d8..f0d7185153b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -65,6 +65,9 @@ struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
+struct dc_mcache_params;
+
+#define MAX_RMCM_INST 2
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -78,11 +81,10 @@ struct resource_funcs {
/* Create a minimal link encoder object with no dc_link object
* associated with it. */
struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id);
-
- bool (*validate_bandwidth)(
+ enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -107,7 +109,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* Algorithm for assigning available link encoders to links.
@@ -218,6 +220,16 @@ struct resource_funcs {
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ bool (*program_mcache_pipe_config)(struct dc_state *context,
+ const struct dc_mcache_params *mcache_params);
+ enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
};
struct audio_support{
@@ -276,6 +288,7 @@ struct resource_pool {
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+ struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
struct {
unsigned int xtalin_clock_inKhz;
@@ -382,7 +395,9 @@ struct link_resource {
struct link_config {
struct dc_link_settings dp_link_settings;
+ struct dc_tunnel_settings dp_tunnel_settings;
};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -480,6 +495,10 @@ struct pipe_ctx {
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
@@ -507,7 +526,7 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
- /* solely used for build scalar data in dml2 */
+ /* used to build scalar data in dml2 and for edp backlight programming */
struct pipe_ctx temp_pipe;
};
@@ -545,7 +564,10 @@ struct dcn_bw_output {
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
- union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union {
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES];
+ };
struct dml2_display_arb_regs arb_regs;
};
@@ -630,7 +652,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[100];
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index d19a595c2be4..134091d5842d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn_get_soc_clks(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index c14d64687a3d..2c9a4a12bd8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -100,6 +100,17 @@ struct dcn301_clk_internal {
#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
+enum clk_type {
+ CLK_TYPE_DCFCLK,
+ CLK_TYPE_FCLK,
+ CLK_TYPE_MCLK,
+ CLK_TYPE_SOCCLK,
+ CLK_TYPE_DTBCLK,
+ CLK_TYPE_DISPCLK,
+ CLK_TYPE_DPPCLK,
+ CLK_TYPE_DSCCLK,
+ CLK_TYPE_COUNT
+};
struct clk_limit_table_entry {
unsigned int voltage; /* milivolts withh 2 fractional bits */
@@ -324,6 +335,11 @@ struct clk_mgr_funcs {
int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+ bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
+
+ uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
+
+ unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 221645c023b5..bac8febad69a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -199,6 +199,7 @@ enum dentist_divider_range {
CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
CLK_SR_DCN35(CLK5_spll_field_8), \
+ CLK_SR_DCN35(CLK6_spll_field_8), \
SR(DENTIST_DISPCLK_CNTL), \
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
@@ -307,7 +308,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK4_ALLOW_DS;
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
-
+ uint32_t CLK6_spll_field_8;
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index e94e9ba60f55..61c4d2a7db1c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -211,7 +211,7 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index b610beb075d5..cee29e89ec5c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -282,7 +282,7 @@ struct hubp_funcs {
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 3a89cc0cffc1..7641439f6ca0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment {
enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
+struct mpc_rmcm_regs {
+ uint32_t rmcm_3dlut_mem_pwr_state;
+ uint32_t rmcm_3dlut_mem_pwr_force;
+ uint32_t rmcm_3dlut_mem_pwr_dis;
+ uint32_t rmcm_3dlut_mem_pwr_mode;
+ uint32_t rmcm_3dlut_size;
+ uint32_t rmcm_3dlut_mode;
+ uint32_t rmcm_3dlut_mode_cur;
+ uint32_t rmcm_3dlut_read_sel;
+ uint32_t rmcm_3dlut_30bit_en;
+ uint32_t rmcm_3dlut_wr_en_mask;
+ uint32_t rmcm_3dlut_ram_sel;
+ uint32_t rmcm_3dlut_out_norm_factor;
+ uint32_t rmcm_3dlut_fl_sel;
+ uint32_t rmcm_3dlut_out_offset_r;
+ uint32_t rmcm_3dlut_out_scale_r;
+ uint32_t rmcm_3dlut_fl_done;
+ uint32_t rmcm_3dlut_fl_soft_underflow;
+ uint32_t rmcm_3dlut_fl_hard_underflow;
+ uint32_t rmcm_cntl;
+ uint32_t rmcm_shaper_mem_pwr_state;
+ uint32_t rmcm_shaper_mem_pwr_force;
+ uint32_t rmcm_shaper_mem_pwr_dis;
+ uint32_t rmcm_shaper_mem_pwr_mode;
+ uint32_t rmcm_shaper_lut_mode;
+ uint32_t rmcm_shaper_mode_cur;
+ uint32_t rmcm_shaper_lut_write_en_mask;
+ uint32_t rmcm_shaper_lut_write_sel;
+ uint32_t rmcm_shaper_offset_b;
+ uint32_t rmcm_shaper_scale_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_seg_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_base_b;
+};
+
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -301,6 +337,7 @@ struct mpcc_state {
uint32_t rgam_mode;
uint32_t rgam_lut;
struct mpc_grph_gamut_adjustment gamut_remap;
+ struct mpc_rmcm_regs rmcm_regs;
};
/**
@@ -967,23 +1004,6 @@ struct mpc_funcs {
*/
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
- /**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
- void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
* @populate_lut:
@@ -1054,6 +1074,45 @@ struct mpc_funcs {
* void
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+
+ /**
+ * @mcm:
+ *
+ * MPC MCM new HW sequential programming functions
+ */
+ struct {
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } mcm;
+
+ /**
+ * @rmcm:
+ *
+ * MPC RMCM new HW sequential programming functions
+ */
+ struct {
+ void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+
+ void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on);
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } rmcm;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 7f371cbb35cd..0d5a8358a778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -68,6 +68,7 @@ struct optc {
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 00ea3864dd4d..44f86cc2d1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -46,6 +46,8 @@ struct pg_cntl_funcs {
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe7f3137f228..27f950ae45ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -117,6 +117,7 @@ struct stream_encoder {
uint32_t stream_enc_inst;
struct vpg *vpg;
struct afmt *afmt;
+ struct apg *apg;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 2948a696ee12..f2503402c10e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -144,9 +144,9 @@ struct link_service {
uint32_t (*dp_link_bandwidth_kbps)(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
- bool (*validate_dpia_bandwidth)(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+ enum dc_status (*validate_dp_tunnel_bandwidth)(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t (*dp_required_hblank_size_bytes)(
const struct dc_link *link,
@@ -207,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
+ void (*dp_decide_tunnel_settings)(
+ struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index a402df225a76..26cb1459b743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
initial_val, \
n, __VA_ARGS__)
+#define IX_REG_SET_SYNC(index, init_value, f1, v1) \
+ IX_REG_SET_N_SYNC(index, 1, init_value, \
+ FN(reg, f1), v1)
+
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 9458187b834d..a890f581f4e8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -32,6 +32,7 @@
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
+#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -65,6 +66,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
+struct dc_mcache_allocations {
+ int global_mcache_ids_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_plane1[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
+};
+
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);
@@ -628,8 +636,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
/* Get hw programming parameters container from pipe context
* @pipe_ctx: pipe context
* @dscl_prog_data: struct to hold programmable hw reg values
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 953f4a4dacad..33ce470e4c88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
index 2c72074310c7..d777b85e70da 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -46,36 +46,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 49317934ef4f..3a9163acb49b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -37,36 +37,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 9ca28565a9d1..4ce9edd16344 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 916f0c974637..5847af0e66cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 1d61d475d36f..6417011d2246 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 42cdfe6c3538..71d2f065140b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index a443a8abb1ea..2a4080bdcf6b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 8ffc7e2c681a..624f1ac309f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 262bb8b74b15..137caffae916 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 53e78ae7eecf..921cb167d920 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index e0563e880432..0118fd6e5db0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 2ef22299101a..adebfc888618 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index f0ac0aeeac51..e9e315c75d76 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
-}
-
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3)
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index ea8c271171bc..79e5e8c137ca 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index 7ec8e0de2f01..163b8ee9ebf7 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
index ea958628f8b8..f716ab0fd30e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index b43c9524b0de..fd9bb1950c20 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index eca3d7ee7e4e..b595a11c5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+//HPD1_DC_HPD_INT_STATUS
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
+
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index b178f85944cd..bbcef3d2fe33 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 110f656d43ae..a2f7b933bebf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -161,6 +161,20 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DPCX_TX_PHYE,
DC_IRQ_SOURCE_DPCX_TX_PHYF,
+ DC_IRQ_SOURCE_DC1_VLINE2,
+ DC_IRQ_SOURCE_DC2_VLINE2,
+ DC_IRQ_SOURCE_DC3_VLINE2,
+ DC_IRQ_SOURCE_DC4_VLINE2,
+ DC_IRQ_SOURCE_DC5_VLINE2,
+ DC_IRQ_SOURCE_DC6_VLINE2,
+
+ DC_IRQ_SOURCE_DCI2C_RR_DDC1,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC2,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC3,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC4,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC5,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC6,
+
DAL_IRQ_SOURCES_NUMBER
};
@@ -170,6 +184,8 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1,
+ IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2,
IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 96febabf464a..2956c2b3ad1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "clk_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ struct dc_stream_update stream_update = { 0 };
+ bool dpms_off = false;
+ bool needs_divider_update = false;
bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
bool is_hpo_acquired;
uint8_t count;
int i;
+ struct audio_output audio_output[MAX_PIPES];
+
+ needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
+ link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
udelay(100);
@@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
link->dc,
state,
pipes[i]);
+
+ // Disable OTG and re-enable after updating clocks
+ pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg);
}
- if (link->dc->hwss.setup_hpo_hw_control) {
- is_hpo_acquired = resource_is_hpo_acquired(state);
- if (was_hpo_acquired != is_hpo_acquired)
- link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
+ link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
+ link_setting, count,
+ *pipes, &audio_output[0]);
+ for (i = 0; i < count; i++) {
+ pipes[i]->clock_source->funcs->program_pix_clk(
+ pipes[i]->clock_source,
+ &pipes[i]->stream_res.pix_clk_params,
+ link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings),
+ &pipes[i]->pll_settings);
+
+ if (pipes[i]->stream_res.audio != NULL) {
+ const struct link_hwss *link_hwss = get_link_hwss(
+ link, &pipes[i]->link_res);
+
+ link_hwss->setup_audio_output(pipes[i], &audio_output[i],
+ pipes[i]->stream_res.audio->inst);
+
+ pipes[i]->stream_res.audio->funcs->az_configure(
+ pipes[i]->stream_res.audio,
+ pipes[i]->stream->signal,
+ &audio_output[i].crtc_info,
+ &pipes[i]->stream->audio_info,
+ &audio_output[i].dp_link_info);
+
+ if (link->dc->config.disable_hbr_audio_dp2 &&
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
+ link->dc->link_srv->dp_is_128b_132b_signal(pipes[i]))
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio);
+ }
+ }
}
- for (i = count-1; i >= 0; i--)
- link_set_dpms_on(state, pipes[i]);
+ // Toggle on HPO I/O if necessary
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+
+ for (i = 0; i < count; i++)
+ pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
+
+ // Set DPMS on with stream update
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
+ stream_update.stream = state->streams[i];
+ stream_update.dpms_off = &dpms_off;
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 116ff37126e7..55c5148de800 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- uint8_t clk_src = 0x4C;
+ uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index cc9191a5c9e6..827b630daf49 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
+ if (!detect_dp_sink_caps(link)) {
return false;
+ }
if (is_dp_branch_device(link))
/* DP SST branch */
@@ -611,6 +612,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -1007,21 +1009,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
- /*
- * If this is DP over USB4 link then we need to:
- * - Enable BW ALLOC support on DPtx if applicable
- */
- if (dc->config.usb4_bw_alloc_support) {
- if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
- /* update with non reduced link cap if bw allocation mode is supported */
- if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
- link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate =
- link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count =
- link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
- }
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 268626e73c54..8c8682f743d6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
}
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
+ (link->type != dc_connection_none))
dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -148,6 +149,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +162,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ /* The subsequent call to dc_commit_updates_for_stream for a full update
+ * will release the current state and swap to a new state. Releasing the
+ * current state results in the stream pointers in the pipe_ctx structs
+ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
+ */
+ for (i = 0; i < count; i++)
+ streams[i] = pipes[i]->stream;
+
for (i = 0; i < count; i++) {
- stream_update.stream = pipes[i]->stream;
+ stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipes[i]->stream, &stream_update,
+ streams[i], &stream_update,
state);
}
@@ -833,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -2287,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
}
- /* get dp overhead for dp tunneling */
- link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
@@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
- /* since current psp not loaded, we need to reset it to default*/
+ /* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2528,6 +2537,14 @@ void link_set_dpms_on(
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
update_psp_stream_config(pipe_ctx, false);
+
+ if (link->is_dds) {
+ uint32_t post_oui_delay = 30; // 30ms
+
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ }
+
return;
}
@@ -2611,7 +2628,7 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_hblank_reduction_on_rx(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2620,6 +2637,15 @@ void link_set_dpms_on(
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
+ /* Corruption was observed on systems with display mux when stream gets
+ * enabled after the mux switch. Having a small delay between link
+ * training and stream unblank resolves the corruption issue.
+ * This is workaround.
+ */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ link->is_display_mux_present)
+ msleep(20);
+
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index f6b6b19e7481..de1143dbbd25 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
{
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
- link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth;
link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
@@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;
@@ -464,6 +465,7 @@ static bool construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_read_request = DC_IRQ_SOURCE_INVALID;
link->link_status.dpcd_caps = &link->dpcd_caps;
link->dc = init_params->dc;
@@ -514,6 +516,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_HDMI_TYPE_A:
link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ if (link->hpd_gpio)
+ link->irq_source_read_request =
+ dal_irq_get_read_request(link->hpd_gpio);
break;
case CONNECTOR_ID_SINGLE_LINK_DVID:
case CONNECTOR_ID_SINGLE_LINK_DVII:
@@ -534,10 +539,16 @@ static bool construct_phy(struct dc_link *link,
break;
case CONNECTOR_ID_EDP:
+ // If smartmux is supported, only create the link on the primary eDP.
+ // Dual eDP is not supported with smartmux.
+ if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
+ goto create_fail;
+
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
+ if (!link->dc->config.allow_edp_hotplug_detection
+ && !is_smartmux_suported(link))
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
@@ -653,7 +664,7 @@ static bool construct_phy(struct dc_link *link,
}
/* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
+ * CRT for rgb, LCD for other supported signal types
*/
if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
link->device_tag.dev_id))
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 29606fda029d..aecaf37eee35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing(
if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
return false;
break;
+ case PIXEL_ENCODING_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
default:
/* Invalid Pixel Encoding*/
return false;
@@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
break;
+ case COLOR_DEPTH_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
+static uint32_t dp_get_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing,
+ const struct dc_link *link)
+{
+ return dc_bandwidth_in_kbps_from_timing(timing,
+ dc_link_get_highest_encoding_format(link));
+}
+
static bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+ const struct dc_tunnel_settings *dp_tunnel_settings = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) {
+ dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings;
+ break;
+ }
+ }
+
+ return dp_tunnel_settings;
+}
+
/*
- * This function calculates the bandwidth required for the stream timing
- * and aggregates the stream bandwidth for the respective dpia link
- *
- * @stream: pointer to the dc_stream_state struct instance
- * @num_streams: number of streams to be validated
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: true if validation is succeeded
+ * return: dc_status
*/
-bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- int bw_needed[MAX_DPIA_NUM] = {0};
- struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
- int num_dpias = 0;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
- /* new dpia sst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
+ struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
+ uint8_t link_count = 0;
+ enum dc_status result = DC_OK;
- dpia_link[num_dpias] = stream[i].link;
- bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
- num_dpias++;
- } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- uint8_t j = 0;
- /* check whether its a known dpia link */
- for (; j < num_dpias; ++j) {
- if (dpia_link[j] == stream[i].link)
- break;
- }
+ // Iterate through streams in the new context
+ for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) {
+ const struct dc_stream_state *stream = new_ctx->streams[i];
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *dp_tunnel_settings;
+ uint32_t timing_bw;
+
+ if (stream == NULL)
+ continue;
+
+ link = stream->link;
+
+ if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ && link->hpd_status))
+ continue;
- if (j == num_dpias) {
- /* new dpia mst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
- else {
- dpia_link[j] = stream[i].link;
- num_dpias++;
- }
+ dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
+
+ if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false))
+ continue;
+
+ timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link);
+
+ // Find an existing entry for this 'link' in 'dpia_link_sets'
+ for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) {
+ bool is_new_slot = false;
+
+ if (dpia_link_sets[j].link == NULL) {
+ is_new_slot = true;
+ link_count++;
+ dpia_link_sets[j].required_bw = 0;
+ dpia_link_sets[j].link = link;
}
- bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[j]));
+ if (is_new_slot || (dpia_link_sets[j].link == link)) {
+ dpia_link_sets[j].tunnel_settings = dp_tunnel_settings;
+ dpia_link_sets[j].required_bw += timing_bw;
+ break;
+ }
}
}
- /* Include dp overheads */
- for (uint8_t i = 0; i < num_dpias; ++i) {
- int dp_overhead = 0;
-
- dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
- bw_needed[i] += dp_overhead;
- }
+ if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false)
+ result = DC_FAIL_DP_TUNNEL_BW_VALIDATE;
- return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ return result;
}
struct dp_audio_layout_config {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index bf398c49c3e8..9553c81053fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing);
-bool link_validate_dpia_bandwidth(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+enum dc_status link_validate_dp_tunnel_bandwidth(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 21ee0d96c9d4..651926e547b9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
+{
+ /* Calculate offset for LTTPR closest to DPTX which is highest in the chain
+ * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
+ */
+ return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
+}
+
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -377,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
- return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ bool is_lttpr_present = (lttpr_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
+
+ if (lttpr_count > 0 && !is_lttpr_present)
+ DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
+
+ return is_lttpr_present;
}
/* in DP compliance test, DPR-120 may have
@@ -1374,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link)
struct dpcd_amd_signature amd_signature = {0};
struct dpcd_amd_device_id amd_device_id = {0};
+ if (link->is_dds) {
+ uint8_t dpcd_dp_edp_backlight_mode = 0;
+
+ /*
+ * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register
+ * if platform is DDS
+ */
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ dpcd_dp_edp_backlight_mode &= ~0x3;
+
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ }
+
amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
amd_device_id.device_id_byte2 =
@@ -1529,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+ if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) {
+ link->dpcd_sink_ext_caps.raw = 0;
+ return false;
+ }
if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
return false;
@@ -1543,6 +1576,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
+ uint32_t lttpr_count;
+ uint32_t closest_lttpr_offset;
/* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
@@ -1594,20 +1629,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ lttpr_count == 0) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
*/
DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ lttpr_count = 1;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
@@ -1615,11 +1652,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
+ // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR
+ closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count);
- core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
+ core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
+
+ if (lttpr_count > 1) {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "Closest LTTPR To Host's IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "Closest LTTPR To Host's LTTPR Device ID: ");
+ } else {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "LTTPR IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "LTTPR Device ID: ");
+ }
}
return status;
@@ -2013,11 +2064,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
/* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read DP tunneling device data failed.\n", __func__);
- }
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ DC_LOG_DP2("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 0ce0af3ddbeb..940b147cc5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
+
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..9b2f1a7da1d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -62,6 +62,39 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
+ status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
+ dpcd_dp_tun_data, 2);
+
+ if (status != DC_OK)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw =
+ dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY];
+ }
+
+ DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
+ "DPIA_BW_Alloc_support=%d "
+ "CM_BW_Alloc_support=%d ",
+ __func__, link->link_index,
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +104,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -92,6 +118,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
@@ -119,3 +146,26 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting)
+{
+ struct dc_link *link = stream->link;
+
+ memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
+
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+ dp_tunnel_setting->should_enable_dp_tunneling =
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F;
+ dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id;
+ dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw;
+ dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw;
+ dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 363f45a1a964..a61edfc9ca7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
+
+/* Decide the DP tunneling settings based on the DPCD capabilities
+ */
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
+
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index a254ead2f7e8..819bf2d8ba53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -35,6 +35,8 @@
#define Kbps_TO_Gbps (1000 * 1000)
+#define MST_TIME_SLOT_COUNT 64
+
// ------------------------------------------------------------------
// PRIVATE FUNCTIONS
// ------------------------------------------------------------------
@@ -46,9 +48,10 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
- && link->hpd_status
- && link->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->hpd_status
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -141,7 +144,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
-static void init_usb4_bw_struct(struct dc_link *link)
+static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -159,78 +162,6 @@ static void init_usb4_bw_struct(struct dc_link *link)
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
-static uint8_t get_lowest_dpia_index(struct dc_link *link)
-{
- const struct dc *dc_struct = link->dc;
- uint8_t idx = 0xFF;
- int i;
-
- for (i = 0; i < MAX_LINKS; ++i) {
-
- if (!dc_struct->links[i] ||
- dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- if (idx > dc_struct->links[i]->link_index) {
- idx = dc_struct->links[i]->link_index;
- break;
- }
- }
-
- return idx;
-}
-
-/*
- * Get the maximum dp tunnel banwidth of host router
- *
- * @dc: pointer to the dc struct instance
- * @hr_index: host router index
- *
- * return: host router maximum dp tunnel bandwidth
- */
-static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
-{
- uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
- uint8_t hr_index_temp = 0;
- struct dc_link *link_dpia_primary, *link_dpia_secondary;
- int total_bw = 0;
-
- for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
-
- if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
-
- if (hr_index_temp == hr_index) {
- link_dpia_primary = dc->links[i];
- link_dpia_secondary = dc->links[i + 1];
-
- /**
- * If BW allocation enabled on both DPIAs, then
- * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
- * otherwise HR BW = Estimated(bw alloc enabled dpia)
- */
- if ((link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
- (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
- total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
- link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
- } else if (link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
- } else if (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
- }
- break;
- }
- }
-
- return total_bw;
-}
-
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@@ -250,93 +181,81 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
{
- uint8_t requested_bw;
- uint32_t temp;
+ uint8_t request_reg_val;
+ uint32_t temp, request_bw;
- /* Error check whether request bw greater than allocated */
- if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
- DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n",
- __func__, link->link_index);
- req_bw = link->dpia_bw_alloc_config.estimated_bw;
+ if (link->dpia_bw_alloc_config.bw_granularity == 0) {
+ DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index);
+ return;
}
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
- requested_bw = temp / Kbps_TO_Gbps;
+ request_reg_val = temp / Kbps_TO_Gbps;
/* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
- ++requested_bw;
+ ++request_reg_val;
- /* Error check whether requested and allocated are equal */
- req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
- DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n",
- __func__, link->link_index);
+ request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ if (request_bw > link->dpia_bw_alloc_config.estimated_bw) {
+ DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!",
+ __func__, link->link_index,
+ req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw);
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ request_reg_val = temp / Kbps_TO_Gbps;
+ if (temp % Kbps_TO_Gbps)
+ ++request_reg_val;
}
+ link->dpia_bw_alloc_config.allocated_bw = request_bw;
+ DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw);
+
core_link_write_dpcd(link, REQUESTED_BW,
- &requested_bw,
+ &request_reg_val,
sizeof(uint8_t));
}
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
- uint8_t response = 0,
- bw_support_dpia = 0,
- bw_support_cm = 0;
+ uint8_t val;
- if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
- goto out;
+ if (link->hpd_status) {
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_dpia = (response >> 7) & 1;
-
- if (core_link_read_dpcd(
- link,
- USB4_DRIVER_BW_CAPABILITY,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_cm = (response >> 7) & 1;
-
- /* Send request acknowledgment to Turn ON DPTX support */
- if (bw_support_cm && bw_support_dpia) {
-
- response = 0x80;
- if (core_link_write_dpcd(
- link,
- DPTX_BW_ALLOCATION_MODE_CONTROL,
- &response,
- sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
- } else {
- // SUCCESS Enabled DPtx BW Allocation Mode Support
- DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+
+ retrieve_usb4_dp_bw_allocation_info(link);
+
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
- ret = true;
- init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
- /*
- * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
- * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
- * to make the CM to release preallocation and update estimated BW correctly for
- * all DPIAs per host router
- */
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
-out:
return ret;
}
@@ -350,19 +269,17 @@ out:
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
} else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
} else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
@@ -378,7 +295,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
*/
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
- if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpia_bw_alloc_config.bw_alloc_enabled) {
//1. Hot Plug
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
@@ -394,83 +312,130 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
- DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
+ DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d",
__func__, link->link_index, link->hpd_status,
- link->dpia_bw_alloc_config.allocated_bw, req_bw);
+ link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.estimated_bw,
+ req_bw);
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
else
- DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
+ DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
+}
+
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link)
+{
+ uint32_t link_dp_overhead = 0;
+
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap = dc_link_get_link_cap(link);
+
+ if (link_cap) {
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT)
+ + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0);
+ }
+ }
+
+ return link_dp_overhead;
}
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ * And then validate if the required bandwidth is within the router's capacity.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count)
{
- bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
- uint8_t lowest_dpia_index, i, hr_index;
+ uint32_t granularity_Gbps;
+ const struct dc_link *link;
+ uint32_t link_bw_granularity;
+ uint32_t link_required_bw;
+ struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 };
+ uint8_t i;
+ bool is_success = true;
+ uint8_t router_count = 0;
+
+ if ((dpia_link_sets == NULL) || (count == 0))
+ return is_success;
+
+ // Iterate through each DP tunneling link (DPIA).
+ // Aggregate its bandwidth requirements onto the respective USB4 router.
+ for (i = 0; i < count; i++) {
+ link = dpia_link_sets[i].link;
+ link_required_bw = dpia_link_sets[i].required_bw;
+ const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings;
+
+ if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0)
+ break;
- if (!num_dpias || num_dpias > MAX_DPIA_NUM)
- return ret;
+ if (link->type == dc_connection_mst_branch)
+ link_required_bw += link_dpia_get_dp_overhead(link);
- lowest_dpia_index = get_lowest_dpia_index(link[0]);
+ granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity);
+ link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps +
+ ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0);
- /* get total Host Router BW with granularity for the given modes */
- for (i = 0; i < num_dpias; ++i) {
- int granularity_Gbps = 0;
- int bw_granularity = 0;
+ // Find or add the USB4 router associated with the current DPIA link
+ for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) {
+ if (router_sets[j].is_valid == false) {
+ router_sets[j].is_valid = true;
+ router_sets[j].cm_id = dp_tunnel_settings->cm_id;
+ router_count++;
+ }
- if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
- continue;
+ if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) {
+ uint32_t remaining_bw =
+ dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw;
- if (link[i]->link_index < lowest_dpia_index)
- continue;
+ router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw;
- granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
- bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
- ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+ if (remaining_bw > router_sets[j].remaining_bw)
+ router_sets[j].remaining_bw = remaining_bw;
- hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[hr_index] += bw_granularity;
- }
+ // Get the max estimated BW within the same CM_ID
+ if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw)
+ router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw;
- /* validate against each Host Router max BW */
- for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
- if (bw_needed_per_hr[hr_index]) {
- host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
- if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
- ret = false;
+ router_sets[j].required_bw += link_bw_granularity;
+ router_sets[j].dpia_count++;
break;
}
}
}
- return ret;
-}
+ // Validate bandwidth for each unique router found.
+ for (i = 0; i < router_count; i++) {
+ uint32_t total_bw = 0;
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
-{
- int dp_overhead = 0, link_mst_overhead = 0;
+ if (router_sets[i].is_valid == false)
+ break;
- if (!link_dp_is_bw_alloc_available(link))
- return dp_overhead;
+ // Determine the total available bandwidth for the current router based on aggregated data
+ if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0))
+ total_bw = router_sets[i].estimated_bw;
+ else
+ total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw;
- /* if its mst link, add MTPH overhead */
- if ((link->type == dc_connection_mst_branch) &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
- * MST overhead is 1/64 of link bandwidth (excluding any overhead)
- */
- const struct dc_link_settings *link_cap =
- dc_link_get_link_cap(link);
- uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
- (uint32_t)link_cap->lane_count *
- LINK_RATE_REF_FREQ_IN_KHZ * 8;
- link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ if (router_sets[i].required_bw > total_bw) {
+ is_success = false;
+ break;
+ }
}
- /* add all the overheads */
- dp_overhead = link_mst_overhead;
-
- return dp_overhead;
+ return is_success;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 6df9b946b00f..41efcb3e44e2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -28,10 +28,6 @@
#include "link.h"
-/* Number of Host Routers per motherboard is 2 */
-#define MAX_HR_NUM 2
-/* Number of DPIA per host router is 2 */
-#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
/*
* Host Router BW type
@@ -42,14 +38,24 @@ enum bw_type {
HOST_ROUTER_BW_INVALID,
};
+struct usb4_router_validation_set {
+ bool is_valid;
+ uint8_t cm_id;
+ uint8_t dpia_count;
+ uint32_t required_bw;
+ uint32_t allocated_bw;
+ uint32_t estimated_bw;
+ uint32_t remaining_bw;
+};
+
/*
- * Enable BW Allocation Mode Support from the DP-Tx side
+ * Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
@@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
- * Handle the validation of total BW here and confirm that the bw used by each
- * DPIA doesn't exceed available BW for each host router (HR)
- *
- * @link[]: array of link pointer to all possible DPIA links
- * @bw_needed[]: bw needed for each DPIA link based on timing
- * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
- *
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
- */
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
-
-/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link);
/*
* Handle DP BW allocation status register
@@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 5be00e4ce10b..693477413347 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -229,6 +229,10 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+ /* Increment desync error counter if a desync error is detected */
+ if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+ link->replay_settings.replay_desync_error_fail_count++;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -240,9 +244,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
- /* Update desync error counter */
- link->replay_settings.replay_desync_error_fail_count++;
-
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -351,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->raw,
DP_SINK_STATUS - DP_SINK_COUNT + 1);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
retval = core_link_read_dpcd(
link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
&irq_data->bytes.link_service_irq_esi0.raw, 1);
@@ -520,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
dp_handle_tunneling_irq(link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index ef358afdfb65..2dc1a660e504 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -785,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
}
enum dc_dp_training_pattern decide_cr_training_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 5a5d48fadbf2..66d0fb1b9b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -142,6 +142,14 @@ void decide_8b_10b_training_settings(
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
+ * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
+ */
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
+ lt_settings->lttpr_early_tps2 = true;
+ }
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -173,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
+static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t lttpr_count)
+{
+ /* Vendor-specific LTTPR early TPS2 sequence:
+ * 1. Output TPS2
+ * 2. Wait 400us
+ * 3. Set link settings as usual
+ * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
+ * 5. Wait 1ms
+ * 6. Begin link training as usual
+ * */
+
+ uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
+
+ dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ udelay(400);
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
+
+ udelay(1000);
+ }
+
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -383,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t repeater_cnt;
+ uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -391,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
+ if (lt_settings->lttpr_early_tps2)
+ set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
+ else
+ dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
- repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 1e4adbc764ea..e7927b8f5ba3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds && !link->dpcd_caps.panel_luminance_control)
+ return true;
+
// use internal backlight control if dmub capabilities are not present
if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX &&
!link->dc->caps.dmub_caps.aux_backlight_support) {
@@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
target_luminance = (struct target_luminance_value *)&backlight_millinits;
+ //make sure we disable AMD ABC first.
+ core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, sizeof(uint8_t));
+ if (backlight_enable) {
+ backlight_enable = 0;
+ core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, 1);
+ }
+
core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&backlight_enable, sizeof(uint8_t));
@@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
uint8_t backlight_control = isHDR ? 1 : 0;
+ uint8_t backlight_enable = 0;
+
// OLEDs have no PWM, they can only use AUX
if (link->dpcd_sink_ext_caps.bits.oled == 1)
backlight_control = 1;
+ //make sure we disable VESA ABC first.
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(uint8_t));
+
+ if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) {
+ backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(backlight_enable));
+ }
+
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
@@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
sizeof(union dpcd_source_backlight_get)))
@@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return true;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
@@ -524,7 +552,7 @@ bool edp_set_backlight_level(const struct dc_link *link,
struct dc *dc = link->ctx->dc;
uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
uint32_t frame_ramp = backlight_level_params->frame_ramp;
- DC_LOGGER_INIT(link->ctx->logger);
+
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -916,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
link->replay_settings.replay_allow_active = *allow_active;
}
@@ -1022,6 +1050,9 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
&alpm_config.raw,
sizeof(alpm_config.raw));
}
+
+ link->replay_settings.config.replay_video_conferencing_optimization_enabled = false;
+
return true;
}
@@ -1130,11 +1161,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
struct abm *abm = NULL;
for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
+ abm = pipe_ctx->stream_res.abm;
break;
}
}
@@ -1170,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+bool is_smartmux_suported(struct dc_link *link)
+{
+ if (link->dc->caps.is_apu)
+ return false;
+ if (!link->dc->config.smart_mux_version)
+ return false;
+
+ return true;
+}
+
static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
struct link_resource *link_res, bool enable)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index bcfa6ac5d4e7..4a475d5b9dde 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool is_smartmux_suported(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 1e2e66508192..5402c3529f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o
AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401))
AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401)
-endif
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..6f0e017a8ae2 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -30,7 +30,6 @@
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
-#include "dcn401/dcn401_mpc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
@@ -370,275 +369,279 @@ void mpc32_program_shaper_luta_settings(
MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-}
-
-
-void mpc32_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id)
-{
- const struct gamma_curve *curve;
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
-
- curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
}
@@ -875,7 +878,7 @@ void mpc32_set3dlut_ram10(
}
-static void mpc32_set_3dlut_mode(
+void mpc32_set_3dlut_mode(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -1018,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
- .set_movable_cm_location = mpc401_set_movable_cm_location,
- .populate_lut = mpc401_populate_lut,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
index 9622518826c9..8c9b20bcca85 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
@@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram(
enum dc_lut_mode mode,
bool is_color_channel_12bits,
uint32_t mpcc_id);
+
+void mpc32_set_3dlut_mode(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17,
+ uint32_t mpcc_id);
+
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ad67197557ca..f3fb3fe13757 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -47,16 +47,6 @@ void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
- MPCC_MCM_3DLUT_FL_DONE, done,
- MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
- MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
-}
-
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
@@ -304,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
}
-static void program_gamut_remap(
+void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
const uint16_t *regval,
@@ -436,7 +426,7 @@ void mpc401_set_gamut_remap(
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
/* Bypass / Disable if type is bypass or hw */
- program_gamut_remap(mpc, mpcc_id, NULL,
+ mpc_program_gamut_remap(mpc, mpcc_id, NULL,
adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
} else {
struct fixed31_32 arr_matrix[12];
@@ -470,12 +460,12 @@ void mpc401_set_gamut_remap(
else
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
- program_gamut_remap(mpc, mpcc_id, arr_reg_val,
+ mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val,
adjust->mpcc_gamut_remap_block_id, mode_select);
}
}
-static void read_gamut_remap(struct mpc *mpc,
+void mpc_read_gamut_remap(struct mpc *mpc,
int mpcc_id,
uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
@@ -571,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
uint16_t arr_reg_val[12] = {0};
- uint32_t mode_select;
+ uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0;
- read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
+ mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -618,7 +608,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
- .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index ce6fbcf14d7a..eb0c68d0b0c7 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,23 +241,22 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
+void mpc_program_gamut_remap(
struct mpc *mpc,
+ unsigned int mpcc_id,
+ const uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ enum mpcc_gamut_remap_mode_select mode_select);
+
+void mpc_read_gamut_remap(struct mpc *mpc,
int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
+ uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ uint32_t *mode_select);
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 81857ce6d68d..e7a90a437fff 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -502,7 +502,7 @@ void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *ref
REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
}
-static struct timing_generator_funcs dcn20_tg_funcs = {
+static const struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index f2415eebdc09..772a8bfb949c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
@@ -129,7 +129,7 @@ static void optc201_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
-static struct timing_generator_funcs dcn201_tg_funcs = {
+static const struct timing_generator_funcs dcn201_tg_funcs = {
.validate_timing = optc201_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index 78b58a449fa4..ee4665aa49e9 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -357,7 +357,7 @@ void optc3_tg_init(struct timing_generator *optc)
optc1_clear_optc_underflow(optc);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index 65e9089b7f31..38f85bc2681a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -109,7 +109,7 @@ void optc301_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index ef536f37b4ed..4f1830ba619f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -315,7 +315,7 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
-static struct timing_generator_funcs dcn31_tg_funcs = {
+static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 0e603bad0d12..4a2caca37255 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -192,7 +192,7 @@ static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc,
}
-static struct timing_generator_funcs dcn314_tg_funcs = {
+static const struct timing_generator_funcs dcn314_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 2cdd19ba634b..b2b226bcd871 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,7 +297,7 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
-static struct timing_generator_funcs dcn32_tg_funcs = {
+static const struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index b86fe2b094f8..72bff94cb57d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -428,7 +428,7 @@ static void optc35_set_long_vtotal(
}
}
-static struct timing_generator_funcs dcn35_tg_funcs = {
+static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 382ac18e7854..ff79c38287df 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -459,7 +459,7 @@ bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
return true;
}
-static struct timing_generator_funcs dcn401_tg_funcs = {
+static const struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f2ba76c1e0c0..782316348941 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,6 +31,7 @@
#include <linux/kgdb.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index b8cddef6b3d2..5b42da8b79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+RESOURCE_DCE60 = dce60_resource.o
+
+AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60)
+endif
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+###############################################################################
+
RESOURCE_DCE100 = dce100_resource.o
AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
@@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR
AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
-###############################################################################
-
-RESOURCE_DCE80 = dce80_resource.o
-
-AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index e698543ec937..3a51be63f020 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -836,10 +836,10 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce100_validate_bandwidth(
+static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
@@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce100_validate_surface_sets(
@@ -1069,7 +1069,7 @@ static bool dce100_resource_construct(
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
- dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.i2c_speed_in_khz_hdcp = 40;
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 035c6cfdaee5..cccde5a6f3cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -960,10 +960,10 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce110_validate_bandwidth(
+static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
@@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 480a50967385..164ba796f64c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -883,10 +883,10 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
@@ -952,7 +952,7 @@ bool dce112_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status resource_map_phy_clock_resources(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f9b4..3efc4c55d2d2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -42,10 +42,10 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 889f314cac65..53b60044653f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -48,7 +48,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "dce60/dce60_hw_sequencer.h"
+#include "dce60/dce60_hwseq.h"
#include "dce100/dce100_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -373,7 +373,7 @@ static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
.num_stream_encoder = 6,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 6,
};
@@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
.num_stream_encoder = 2,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 2,
};
@@ -863,10 +863,10 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce60_validate_bandwidth(
+static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
@@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce60_validate_surface_sets(
@@ -973,21 +973,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@@ -1365,21 +1368,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
index 5d653a76b0b0..5d653a76b0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3d5113f010bb..3e8b0ac11d90 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -869,10 +869,10 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce80_validate_bandwidth(
+static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
@@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce80_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index e92f14d50adb..652c05c35494 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_status.h"
#include "dm_services.h"
#include "dc.h"
@@ -1125,18 +1126,18 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
-static bool dcn10_validate_bandwidth(
+static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
DC_FP_START();
- voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode);
DC_FP_END();
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
@@ -1245,6 +1246,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id ==
+ link->dpia_preferred_eng_id)
+ return pool->stream_enc[i];
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e4eca3e32c1b..f9cbdad3ef37 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -2124,22 +2124,22 @@ validate_out:
return out;
}
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
@@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11a7f..e997d35a8b86 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
@@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 43fa2cb117f3..e4a1338d21e0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 4bd5c2278596..918742a42ded 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -923,22 +923,22 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_NOT_SUPPORTED;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
@@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c2f7..a017fd9854d1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
@@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f01ced015072..895349d9ca07 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only)
{
bool out = false;
@@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw(
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate || !allow_self_refresh_only) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw(
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
if (allow_self_refresh_only &&
- (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
* If mode is unsupported or there's still no p-state support
@@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw(
}
if (repopulate_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1891,8 +1891,6 @@ static int get_refresh_rate(struct dc_state *context)
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
- if (timing == NULL)
- return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
@@ -2037,9 +2035,9 @@ void dcn30_calculate_wm_and_dlg(
DC_FP_END();
}
-bool dcn30_validate_bandwidth(struct dc *dc,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -2057,7 +2055,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2068,7 +2066,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -2094,7 +2092,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -2588,6 +2586,8 @@ static bool dcn30_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b7368fd..2c967fe55712 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -56,15 +56,15 @@ unsigned int dcn30_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate);
+enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode);
bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_acquire_post_bldn_3dlut(
struct resource_context *res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 121a86a59833..82a205a7c25c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 012c5fd52cb1..3345068a878c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index a8d0b4686f9a..3479e1eab4cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index dddddbfef85f..3ed7f50554e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format)
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.gpuvm = 1;
@@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context(
bool upscaled = false;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1758,9 +1758,9 @@ dcn31_set_mcif_arb_params(struct dc *dc,
DC_FP_END();
}
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1778,19 +1778,19 @@ bool dcn31_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1813,7 +1813,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
@@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -1954,6 +1956,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
@@ -2199,6 +2204,8 @@ static bool dcn31_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
@@ -2228,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool(
kfree(pool);
return NULL;
}
+
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output)
+{
+ struct dc_state *state = link->dc->current_state;
+ int i;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]);
+
+ for (i = 0; i < pipe_count; i++) {
+ link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]);
+
+ // Setup audio
+ if (pipes[i].stream_res.audio != NULL)
+ build_audio_output(state, &pipes[i], &audio_output[i]);
+ }
+#else
+ /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
+ * but the above will not compile on architectures without an FPU.
+ */
+ DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__);
+ ASSERT(0);
+#endif
+
+ return DC_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 551ad912f7be..c32c85ef0ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -37,9 +37,9 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg(
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
@@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool(
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context);
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 26becc4cb804..663c49cce4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.seamless_boot_odm_combine = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
+ .disable_dsc_power_gate = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1667,12 +1668,12 @@ static struct clock_source *dcn31_clock_source_create(
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt;
DC_FP_START();
- pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
DC_FP_END();
return pipe_cnt;
@@ -1694,9 +1695,9 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1715,19 +1716,19 @@ bool dcn314_validate_bandwidth(struct dc *dc,
DC_FP_START();
// do not support self refresh only
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1750,7 +1751,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -1779,7 +1780,9 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -1885,6 +1888,9 @@ static bool dcn314_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2114,6 +2120,8 @@ static bool dcn314_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe71018df..ac9bb7f097d5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -39,9 +39,9 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 6c2bb3f63be1..82cc78c291d8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context(
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn315_resource_construct(
@@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 568094827212..636110e48d01 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format)
static int dcn316_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn316_resource_construct(
@@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 2a59cc61ed8c..9917b366f00c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -24,6 +24,7 @@
*
*/
+#include "dc_types.h"
#include "dm_services.h"
#include "dc.h"
@@ -1741,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
}
}
-static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate)
+static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1766,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
goto validate_fail;
DC_FP_START();
- out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode);
DC_FP_END();
if (pipe_cnt == 0)
@@ -1777,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1806,25 +1807,62 @@ validate_out:
return out;
}
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- out = dml1_validate(dc, context, fast_validate);
- return out;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ else
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1840,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context(
int num_subvp_none = 0;
int odm_slice_count;
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
/* For single display subvp, look for subvp main so if we have phantom
* pipe, we can set odm policy to match main pipe
@@ -1922,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context(
/* Only populate DML input with subvp info for full updates.
* This is just a workaround -- needs a proper fix.
*/
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
@@ -2023,25 +2061,31 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ bool limit_cur_to_buf;
+
+ limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) &&
+ !stream->hw_cursor_req;
+
+ return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size;
+}
+
static struct resource_funcs dcn32_res_pool_funcs = {
.destroy = dcn32_destroy_resource_pool,
.link_enc_create = dcn32_link_encoder_create,
@@ -2067,7 +2111,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2114,8 +2159,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2153,6 +2196,7 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -2207,7 +2251,7 @@ static bool dcn32_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -2226,6 +2270,7 @@ static bool dcn32_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2455,6 +2500,8 @@ static bool dcn32_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2469,7 +2516,6 @@ static bool dcn32_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2501,14 +2547,14 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
- DC_FP_END();
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
return true;
create_fail:
- DC_FP_END();
-
dcn32_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 1aa4ced29291..82f966cf4ed2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -98,14 +98,14 @@ void dcn32_add_phantom_pipes(struct dc *dc,
unsigned int pipe_cnt,
unsigned int index);
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 38d76434683e..061c0907d802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1624,7 +1618,8 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1709,6 +1704,7 @@ static bool dcn321_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -1759,8 +1755,8 @@ static bool dcn321_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1778,6 +1774,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2002,6 +1999,8 @@ static bool dcn321_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2016,7 +2015,6 @@ static bool dcn321_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2044,6 +2042,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.max_segments_per_hubp = 18;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ffd2b816cd02..8475c6eec547 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1732,24 +1732,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
- return out;
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
@@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn35_resource_construct(
@@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1893,6 +1895,13 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
@@ -1903,7 +1912,7 @@ static bool dcn35_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
@@ -2148,12 +2157,13 @@ static bool dcn35_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 98f5bc1b929e..0971c0f74186 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1712,24 +1712,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn351_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
- return out;
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn351_res_pool_funcs = {
@@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn351_resource_construct(
@@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1865,6 +1867,13 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
@@ -2119,13 +2128,14 @@ static bool dcn351_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index b6468573dc33..8bae7fcedc22 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1713,24 +1713,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
- return out;
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn36_resource_construct(
@@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1866,6 +1868,13 @@ static bool dcn36_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
@@ -1876,7 +1885,7 @@ static bool dcn36_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
@@ -2121,12 +2130,13 @@ static bool dcn36_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 7436dfbdf927..b3988e38d0a6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -70,7 +70,6 @@
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml/dcn401/dcn401_fpu.h"
#include "dc_state_priv.h"
@@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
/* re-calculate the available MALL size if required */
if (bw_params->num_channels > 0) {
dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
@@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_START();
- dcn401_update_bw_bounding_box_fpu(dc, bw_params);
-
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1642,16 +1633,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
return DC_OK;
}
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status = DC_OK;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
+
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
- return out;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
void dcn401_prepare_mcache_programming(struct dc *dc,
@@ -1770,7 +1797,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
- .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1846,8 +1874,9 @@ static bool dcn401_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 95;
dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/
- /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64;
dc->caps.cursor_not_scaled = true;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
@@ -1900,8 +1929,8 @@ static bool dcn401_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1919,8 +1948,30 @@ static bool dcn401_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
dc->config.use_spl = true;
dc->config.prefer_easf = true;
+
+ dc->config.dcn_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.sdr_rgb_max = 1750;
+ dc->config.dcn_sharpness_range.sdr_rgb_mid = 750;
+ dc->config.dcn_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500;
+
+ dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
@@ -2139,6 +2190,8 @@ static bool dcn401_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2157,7 +2210,6 @@ static bool dcn401_resource_construct(
dc->config.sdpif_request_limit_words_per_umc = 16;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
@@ -2190,6 +2242,10 @@ static bool dcn401_resource_construct(
/* SPL */
dc->caps.scl_caps.sharpener_support = true;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 4c259745d519..2ae6831c31ef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -22,9 +22,9 @@ struct resource_pool *dcn401_create_resource_pool(
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 28348734d900..55b929ca7982 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- if (use_recout_width_aligned) {
+ if (spl_in->basic_in.custom_width != 0) {
+ mpc_rec.width = spl_in->basic_in.custom_width;
+ mpc_rec.x = spl_in->basic_in.custom_x;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ } else if (use_recout_width_aligned) {
mpc_rec.width = recout_width_align;
if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
mpc_rec.width = plane_clip_rec->width % recout_width_align;
@@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
/* extra pixels in the division remainder need to go to pipes after
* the extra pixel index minus one(epimo) defined here as:
*/
- if (mpc_slice_idx > epimo) {
+ if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
@@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
odm_rec.x = odm_slice_width * odm_slice_idx;
odm_rec.width = is_last_odm_slice ?
- /* last slice width is the reminder of h_active */
- h_active - odm_slice_width * (odm_slice_count - 1) :
- /* odm slice width is the floor of h_active / count */
- odm_slice_width;
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
odm_rec.y = 0;
odm_rec.height = v_active;
@@ -776,7 +781,7 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
* Do not bypass UV at 1:1 for cositing to be applied
*/
if (!enable_isharp) {
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
@@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+ struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps,
+ bool is_subsampled)
{
bool check_max_downscale = false;
@@ -944,13 +951,14 @@ static void spl_get_taps_non_adaptive_scaler(
spl_fixpt_from_fraction(6, 1));
SPL_ASSERT(check_max_downscale);
+
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
}
@@ -965,13 +973,13 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
+ bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -980,7 +988,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -995,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps(
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ if (skip_easf) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
+ }
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1122,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
-
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
@@ -1147,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps(
if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
}
return true;
@@ -1297,7 +1306,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 0;
+ dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1461,7 +1470,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 0;
+ dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
@@ -1898,3 +1907,4 @@ bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out
spl_set_taps_data(dscl_prog_data, data);
return res;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 1c3949b24611..23d254dea18f 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -460,6 +460,8 @@ struct basic_in {
enum spl_color_space color_space; // Color Space
unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr
bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this?
+ int custom_width; // Width for non-standard segmentation - used when != 0
+ int custom_x; // Start x for non-standard segmentation - used when custom_width != 0
};
// Basic output information
@@ -480,6 +482,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
+enum sharpness_range_source {
+ SHARPNESS_RANGE_DCN = 0,
+ SHARPNESS_RANGE_DCN_OVERRIDE
+};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 52d97918a3bd..ebf0287417e0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64(
{
unsigned long long result;
- SPL_ASSERT(divisor);
-
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- SPL_ASSERT(arg.value);
-
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4e0efff92dca..0bafb6710761 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -129,7 +129,9 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
+ DMUB_WINDOW_LSDMA_BUFFER,
DMUB_WINDOW_TOTAL,
};
@@ -142,6 +144,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ DMUB_NOTIFICATION_FUSED_IO,
DMUB_NOTIFICATION_MAX
};
@@ -170,6 +173,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -347,6 +357,22 @@ struct dmub_diagnostic_data {
uint8_t is_traceport_en : 1;
uint8_t is_cw0_enabled : 1;
uint8_t is_cw6_enabled : 1;
+ uint8_t is_pwait : 1;
+};
+
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
};
/**
@@ -422,6 +448,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+ uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
+
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -462,18 +490,21 @@ struct dmub_srv_hw_funcs {
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -493,6 +524,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -510,6 +542,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ struct dmub_fb ib_mem_gart;
volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
@@ -521,8 +554,9 @@ struct dmub_srv {
const struct dmub_srv_dcn401_regs *regs_dcn401;
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -542,9 +576,11 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
+ struct dmub_fb lsdma_rb_fb;
};
/**
@@ -566,22 +602,11 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
- /**
- * DPIA notification command.
- */
- struct dmub_rb_cmd_dpia_notification dpia_notification;
struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
+ struct dmub_cmd_fused_request fused_request;
};
};
-/* enum dmub_ips_mode - IPS mode identifier */
-enum dmub_ips_mode {
- DMUB_IPS_MODE_IPS1_MAX = 0,
- DMUB_IPS_MODE_IPS2,
- DMUB_IPS_MODE_IPS1_RCG,
- DMUB_IPS_MODE_IPS1_ONO2_ON
-};
-
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
@@ -699,19 +724,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -723,11 +736,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -736,7 +749,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -795,6 +808,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -892,9 +922,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
@@ -959,26 +986,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -990,4 +997,71 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ * The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required);
+
+/**
+ * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
+ * @dmub: the dmub service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_HW_FAILURE - issue with HW programming
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 1f5f4e3e49d4..c587b3441e07 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,9 @@
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
+#ifdef __forceinline
+#undef __forceinline
+#endif
#define __forceinline inline
/**
@@ -101,6 +104,14 @@
*/
#define DMUB_MAX_FPO_STREAMS 4
+/* Define to ensure that the "common" members always appear in the same
+ * order in different structs for back compat purposes
+ */
+#define COMMON_STREAM_STATIC_SUB_STATE \
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy; \
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp; \
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+
/* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6
@@ -288,6 +299,31 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+
+/* Flattened structure containing SOC BB parameters stored in the VBIOS
+ * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters.
+ * This also prevents alighment issues when new parameters are added to the SoC BB.
+ * The following parameters should be added since these values can't be obtained elsewhere:
+ * -dml2_soc_power_management_parameters
+ * -dml2_soc_vmin_clock_limits
+ */
+struct dmub_soc_bb_params {
+ uint32_t dram_clk_change_blackout_ns;
+ uint32_t dram_clk_change_read_only_ns;
+ uint32_t dram_clk_change_write_only_ns;
+ uint32_t fclk_change_blackout_ns;
+ uint32_t g7_ppt_blackout_ns;
+ uint32_t stutter_enter_plus_exit_latency_ns;
+ uint32_t stutter_exit_latency_ns;
+ uint32_t z8_stutter_enter_plus_exit_latency_ns;
+ uint32_t z8_stutter_exit_latency_ns;
+ uint32_t z8_min_idle_time_ns;
+ uint32_t type_b_dram_clk_change_blackout_ns;
+ uint32_t type_b_ppt_blackout_ns;
+ uint32_t vmin_limit_dispclk_khz;
+ uint32_t vmin_limit_dcfclk_khz;
+ uint32_t g7_temperature_read_blackout_ns;
+};
#pragma pack(pop)
/**
@@ -547,6 +583,11 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
+
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -739,6 +780,21 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
+ DMUB_IPS_DISABLE_Z8_RETENTION = 7,
+};
+
+enum dmub_ips_rcg_disable_type {
+ DMUB_IPS_RCG_ENABLE = 0,
+ DMUB_IPS0_RCG_DISABLE = 1,
+ DMUB_IPS1_RCG_DISABLE = 2,
+ DMUB_IPS_RCG_DISABLE = 3
+};
+
+enum dmub_ips_in_vpb_disable_type {
+ DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour
+ DMUB_IPS_VPB_DISABLE_ALL = 1,
+ DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2,
+ DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -746,6 +802,17 @@ enum dmub_ips_disable_type {
#define DMUB_IPS1_COMMIT_MASK 0x00000004
#define DMUB_IPS2_COMMIT_MASK 0x00000008
+enum dmub_ips_comand_type {
+ /**
+ * Start/stop IPS residency measurements for a given IPS mode
+ */
+ DMUB_CMD__IPS_RESIDENCY_CNTL = 0,
+ /**
+ * Query IPS residency information for a given IPS mode
+ */
+ DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -815,13 +882,14 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_fw_signals {
+ union dmub_shared_state_ips_fw_signals {
struct {
- uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
- uint32_t reserved_bits : 28; /**< Reversed */
+ uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
+ uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -829,14 +897,19 @@ union dmub_shared_state_ips_fw_signals {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_driver_signals {
+ union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
- uint32_t reserved_bits : 27; /**< Reversed bits */
+ uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
+ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
+ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
+ uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */
+ uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */
+ uint32_t reserved_bits : 22; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -865,7 +938,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
- uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
+ uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
+ uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -1253,6 +1328,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
+ /**
+ * DESC: Initiates IPS wake sequence.
+ */
+ DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -1482,6 +1561,16 @@ enum dmub_cmd_type {
*/
DMUB_CMD__FUSED_IO = 89,
+ /**
+ * Command type used for all LSDMA commands.
+ */
+ DMUB_CMD__LSDMA = 90,
+
+ /**
+ * Command type use for all IPS commands.
+ */
+ DMUB_CMD__IPS = 91,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1892,6 +1981,121 @@ struct dmub_rb_cmd_fams2_flip {
struct dmub_fams2_flip_info flip_info;
};
+struct dmub_cmd_lsdma_data {
+ union {
+ struct lsdma_init_data {
+ union dmub_addr gpu_addr_base;
+ uint32_t ring_size;
+ } init_data;
+ struct lsdma_tiled_copy_data {
+ uint32_t src_addr_lo;
+ uint32_t src_addr_hi;
+ uint32_t dst_addr_lo;
+ uint32_t dst_addr_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t dst_width : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_swizzle_mode : 5;
+ uint32_t src_mip_max : 5;
+ uint32_t src_mip_id : 5;
+ uint32_t dst_mip_max : 5;
+ uint32_t dst_swizzle_mode : 5;
+ uint32_t dst_mip_id : 5;
+ uint32_t tmz : 1;
+ uint32_t dcc : 1;
+
+ uint32_t data_format : 6;
+ uint32_t padding1 : 4;
+ uint32_t dst_element_size : 3;
+ uint32_t num_type : 3;
+ uint32_t src_element_size : 3;
+ uint32_t write_compress : 2;
+ uint32_t cache_policy_dst : 2;
+ uint32_t cache_policy_src : 2;
+ uint32_t read_compress : 2;
+ uint32_t src_dim : 2;
+ uint32_t dst_dim : 2;
+ uint32_t max_uncom : 1;
+
+ uint32_t max_com : 2;
+ uint32_t padding : 30;
+ } tiled_copy_data;
+ struct lsdma_linear_copy_data {
+ uint32_t count : 30;
+ uint32_t cache_policy_dst : 2;
+
+ uint32_t tmz : 1;
+ uint32_t cache_policy_src : 2;
+ uint32_t padding : 29;
+
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } linear_copy_data;
+ struct lsdma_reg_write_data {
+ uint32_t reg_addr;
+ uint32_t reg_data;
+ } reg_write_data;
+ struct lsdma_pio_copy_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } pio_copy_data;
+ struct lsdma_pio_constfill_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ uint32_t data;
+ } pio_constfill_data;
+
+ uint32_t all[14];
+ } u;
+
+};
+
+struct dmub_rb_cmd_lsdma {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_lsdma_data lsdma_data;
+};
+
struct dmub_optc_state_v2 {
uint32_t v_total_min;
uint32_t v_total_max;
@@ -1923,6 +2127,28 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
+struct dmub_rect16 {
+ /**
+ * Dirty rect x offset.
+ */
+ uint16_t x;
+
+ /**
+ * Dirty rect y offset.
+ */
+ uint16_t y;
+
+ /**
+ * Dirty rect width.
+ */
+ uint16_t width;
+
+ /**
+ * Dirty rect height.
+ */
+ uint16_t height;
+};
+
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
@@ -1995,11 +2221,13 @@ union dmub_fams2_stream_static_sub_state {
}; //v0
union dmub_fams2_cmd_stream_static_sub_state {
- struct dmub_fams2_cmd_legacy_stream_static_state legacy;
- struct dmub_fams2_cmd_subvp_stream_static_state subvp;
- struct dmub_fams2_cmd_drr_stream_static_state drr;
+ COMMON_STREAM_STATIC_SUB_STATE
}; //v1
+union dmub_fams2_stream_static_sub_state_v2 {
+ COMMON_STREAM_STATIC_SUB_STATE
+}; //v2
+
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
uint32_t otg_vline_time_ns;
@@ -2065,7 +2293,7 @@ struct dmub_fams2_cmd_stream_static_base_state {
struct dmub_fams2_stream_static_state_v1 {
struct dmub_fams2_cmd_stream_static_base_state base;
- union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ union dmub_fams2_stream_static_sub_state_v2 sub_state;
}; //v1
/**
@@ -2113,6 +2341,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2122,6 +2355,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2144,6 +2393,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2289,7 +2543,8 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
- uint8_t reserved2[3]; /**< For future use */
+ uint8_t skip_phy_ssc_reduction;
+ uint8_t reserved2[2]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
};
@@ -2636,7 +2891,11 @@ enum dp_hpd_type {
/**
* DP HPD short pulse
*/
- DP_IRQ
+ DP_IRQ = 1,
+ /**
+ * Failure to acquire DP HPD state
+ */
+ DP_NONE_HPD = 2
};
/**
@@ -2903,6 +3162,7 @@ enum dmub_cmd_fams_type {
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 7,
};
/**
@@ -3609,6 +3869,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+enum dmub_alpm_mode {
+ ALPM_AUXWAKE = 0,
+ ALPM_AUXLESS = 1,
+ ALPM_UNSUPPORTED = 2,
+};
+
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3742,6 +4008,15 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
+struct dmub_alpm_auxless_data {
+ uint16_t lfps_setup_ns;
+ uint16_t lfps_period_ns;
+ uint16_t lfps_silence_ns;
+ uint16_t lfps_t1_t2_override_us;
+ short lfps_t1_t2_offset_us;
+ uint8_t lttpr_count;
+};
+
/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -3773,6 +4048,14 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -3812,6 +4095,15 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
+
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3867,6 +4159,18 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -4360,6 +4664,42 @@ enum dmub_cmd_abm_type {
* Get the current ACE curve.
*/
DMUB_CMD__ABM_GET_ACE_CURVE = 10,
+
+ /**
+ * Get current histogram data
+ */
+ DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
+};
+
+/**
+ * LSDMA command sub-types.
+ */
+enum dmub_cmd_lsdma_type {
+ /**
+ * Initialize parameters for LSDMA.
+ * Ring buffer is mapped to the ring buffer
+ */
+ DMUB_CMD__LSDMA_INIT_CONFIG = 0,
+ /**
+ * LSDMA copies data from source to destination linearly
+ */
+ DMUB_CMD__LSDMA_LINEAR_COPY = 1,
+ /**
+ * Send the tiled-to-tiled copy command
+ */
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ /**
+ * Send the poll reg write command
+ */
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ /**
+ * Send the pio copy command
+ */
+ DMUB_CMD__LSDMA_PIO_COPY = 4,
+ /**
+ * Send the pio constfill command
+ */
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
};
struct abm_ace_curve {
@@ -4954,6 +5294,20 @@ enum dmub_abm_ace_curve_type {
};
/**
+ * enum dmub_abm_histogram_type - Histogram type.
+ */
+enum dmub_abm_histogram_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW_IF = 1,
+};
+
+/**
* Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
*/
struct dmub_rb_cmd_abm_get_ace_curve {
@@ -4989,6 +5343,41 @@ struct dmub_rb_cmd_abm_get_ace_curve {
};
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+struct dmub_rb_cmd_abm_get_histogram {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where Histogram should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of Histogram being queried.
+ */
+ enum dmub_abm_histogram_type histogram_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -5389,7 +5778,8 @@ struct dmub_cmd_fused_request {
struct dmub_cmd_fused_request_location_i2c {
uint8_t is_aux : 1; // False
uint8_t ddc_line : 3;
- uint8_t _reserved0 : 4;
+ uint8_t over_aux : 1;
+ uint8_t _reserved0 : 3;
uint8_t address;
uint8_t offset;
uint8_t length;
@@ -5517,6 +5907,59 @@ struct dmub_rb_cmd_assr_enable {
};
/**
+ * Current definition of "ips_mode" from driver
+ */
+enum ips_residency_mode {
+ IPS_RESIDENCY__IPS1_MAX,
+ IPS_RESIDENCY__IPS2,
+ IPS_RESIDENCY__IPS1_RCG,
+ IPS_RESIDENCY__IPS1_ONO2_ON,
+};
+
+#define NUM_IPS_HISTOGRAM_BUCKETS 16
+
+/**
+ * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats
+ */
+struct dmub_ips_residency_info {
+ uint32_t residency_millipercent;
+ uint32_t entry_counter;
+ uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
+ uint64_t total_time_us;
+ uint64_t total_inactive_time_us;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command.
+ */
+struct dmub_cmd_ips_residency_cntl_data {
+ uint8_t panel_inst;
+ uint8_t start_measurement;
+ uint8_t padding[2]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_residency_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_residency_cntl_data cntl_data;
+};
+
+/**
+ * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command.
+ */
+struct dmub_cmd_ips_query_residency_info_data {
+ union dmub_addr dest;
+ uint32_t size;
+ uint32_t ips_mode;
+ uint8_t panel_inst;
+ uint8_t padding[3]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_query_residency_info {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_query_residency_info_data info_data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -5687,6 +6130,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+ struct dmub_rb_cmd_abm_get_histogram abm_get_histogram;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5817,13 +6265,25 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
struct dmub_rb_cmd_fused_io fused_io;
+
+ /**
+ * Definition of a DMUB_CMD__LSDMA command.
+ */
+ struct dmub_rb_cmd_lsdma lsdma;
+
+ struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
+
+ struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
};
/**
@@ -5934,6 +6394,9 @@ static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* +1 because 1 entry is always unusable */
+ data_count += DMUB_RB_CMD_SIZE;
+
return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
}
@@ -5953,6 +6416,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* -1 because 1 entry is always unusable */
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index a308bd604677..3f38db752b84 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -416,7 +416,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -466,6 +466,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 72a0f078cd1a..2228d62adc7e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
@@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
-
if (is_enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
@@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub)
LONO_SOCCLK_GATE_DISABLE, 1,
LONO_DMCUBCLK_GATE_DISABLE, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
}
@@ -464,7 +454,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -515,6 +505,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index e67f7c4784eb..b31adbd0d685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
const uint32_t timeout_us = 1 * 1000 * 1000; //1s
const uint32_t poll_delay_us = 1; //1us
uint32_t i = 0;
- uint32_t in_reset, scratch, pwait_mode;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(poll_delay_us);
- }
-
for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
@@ -417,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -468,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
@@ -517,28 +516,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -556,30 +596,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -604,11 +653,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index c35be52676f6..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ae8133816b43..b17a19400c06 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -65,6 +65,12 @@
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
+/* Default indirect buffer size. */
+#define DMUB_IB_MEM_SIZE (1280)
+
+/* Default LSDMA ring buffer size. */
+#define DMUB_LSDMA_RB_SIZE (64 * 1024)
+
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
@@ -157,6 +163,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -395,10 +404,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -411,6 +425,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -426,6 +454,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -536,7 +565,9 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
+ window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -622,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
@@ -632,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+ !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
ASSERT(0);
return DMUB_STATUS_INVALID;
}
@@ -695,7 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -718,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->ib_mem_gart = *ib_mem_gart;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
@@ -737,7 +771,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -768,27 +802,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -799,8 +812,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -811,7 +829,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -820,18 +838,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -846,13 +866,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -910,26 +930,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i, rptr;
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity)
- return DMUB_STATUS_HW_FAILURE;
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
- dmub->inbox1_rb.rptr = rptr;
+ if (status != DMUB_STATUS_OK)
+ return status;
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1040,35 +1118,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1160,47 +1209,162 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+{
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+ return DMUB_STATUS_OK;
+}
+
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required)
+{
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
+
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ /* check for space in inbox1 */
+ if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+ uint32_t rptr;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
+
+ return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index cce887cefc01..567c5b1aeb7a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -95,23 +95,6 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
notify->link_index = cmd.dpia_notification.payload.header.instance;
-
- if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
-
- notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
- notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
-
- if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
- notify->result = DPIA_BW_REQ_FAILED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
- notify->result = DPIA_BW_REQ_SUCCESS;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
- notify->result = DPIA_EST_BW_CHANGED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
- notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
- }
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
@@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
&cmd.hpd_sense_notify.data,
sizeof(cmd.hpd_sense_notify.data));
break;
+ case DMUB_OUT_CMD__FUSED_IO:
+ notify->type = DMUB_NOTIFICATION_FUSED_IO;
+ dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 7e3240e73c1f..63813009a3a6 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source(
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq);
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq);
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config);
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 813463ffe15c..cc467031651d 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -424,7 +424,7 @@ struct integrated_info {
/*
* DFS-bypass flag
*/
-/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+/* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */
enum {
DFS_BYPASS_ENABLE = 0x10
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1867aac57cf2..da74ed66c8f9 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -89,6 +89,8 @@ struct link_training_settings {
bool enhanced_framing;
enum lttpr_mode lttpr_mode;
+ bool lttpr_early_tps2;
+
/* disallow different lanes to have different lane settings */
bool disallow_per_lane_settings;
/* dpcd lane settings will always use the same hw lane settings
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 3ba9b62ba70b..71efd2770c99 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) {
/* When the target refresh rate is the minimum panel refresh rate,
* round down the vtotal value to avoid stretching vblank over
* panel's vtotal boundary.
@@ -155,6 +155,14 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000000);
+ } else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) {
+ /* When the target refresh rate is the maximum panel refresh rate
+ * round up the vtotal value to prevent off-by-one error causing
+ * v_total_min to be below the panel's lower bound
+ */
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total) + (1000000 - 1), 1000000);
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 55c7d873175f..a37634942b07 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 1d41dd58f6bc..bb8ae80b37f8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -452,21 +452,12 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
- event_ctx->unexpected_event = 1;
- goto out;
- }
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
- &input->lc_init_prepare, &status,
- hdcp, "lc_init_prepare"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
&input->lc_init_write, &status,
hdcp, "lc_init_write"))
@@ -482,6 +473,48 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
&input->l_prime_read, &status,
hdcp, "l_prime_read"))
goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+
+ status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index c5f6c11de7e5..89ffb89e1932 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,17 +184,28 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK:
+ case H2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && !adjust->hdcp2.force_sw_locality_check;
+
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_available_poll != PASS ||
- input->l_prime_read != PASS) {
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ (!use_fw && input->lc_init_write != PASS) ||
+ (!use_fw && input->l_prime_available_poll != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
@@ -205,6 +216,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -498,14 +510,25 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK:
+ case D2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !adjust->hdcp2.force_sw_locality_check;
+
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_read != PASS) {
+ (!use_fw && input->lc_init_write != PASS)) {
/* 1A-12: consider invalid l' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
+ fail_and_restart_in_ms(0, &status, output);
+ break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -514,6 +537,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 6e064e6ae949..2e6408579194 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
+
+static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+
+ struct mod_hdcp_atomic_op_aux write = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT],
+ hdcp2->lc_init + 1,
+ sizeof(hdcp2->lc_init) - 1,
+ };
+ struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, };
+ struct mod_hdcp_atomic_op_aux read = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime + 1,
+ sizeof(hdcp2->lc_l_prime) - 1,
+ };
+
+ hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_aux(
+ hdcp->config.ddc.handle,
+ &write,
+ &stall,
+ &read,
+ 16 * 1000,
+ 0
+ );
+}
+
+static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+ uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) };
+
+ hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT];
+ memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init));
+
+ struct mod_hdcp_atomic_op_i2c write = {
+ HDCP_I2C_ADDR,
+ 0,
+ hdcp->buf,
+ sizeof(hdcp2->lc_init) + 1,
+ };
+ struct mod_hdcp_atomic_op_i2c poll = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS],
+ expected_rxstatus,
+ sizeof(expected_rxstatus),
+ };
+ struct mod_hdcp_atomic_op_i2c read = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime,
+ sizeof(hdcp2->lc_l_prime),
+ };
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c(
+ hdcp->config.ddc.handle,
+ &write,
+ &poll,
+ &read,
+ 20 * 1000,
+ 6
+ );
+}
+
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
+{
+ const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..e58e7b93810b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a4d344a4db9e..c42468bb70ac 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -133,9 +133,22 @@ enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
};
+struct mod_hdcp_atomic_op_i2c {
+ uint8_t address;
+ uint8_t offset;
+ uint8_t *data;
+ uint32_t size;
+};
+
+struct mod_hdcp_atomic_op_aux {
+ uint32_t address;
+ uint8_t *data;
+ uint32_t size;
+};
+
struct mod_hdcp_ddc {
void *handle;
- struct {
+ struct mod_hdcp_ddc_funcs {
bool (*read_i2c)(void *handle,
uint32_t address,
uint8_t offset,
@@ -153,6 +166,22 @@ struct mod_hdcp_ddc {
uint32_t address,
const uint8_t *data,
uint32_t size);
+ bool (*atomic_write_poll_read_i2c)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
+ bool (*atomic_write_poll_read_aux)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
} funcs;
};
@@ -185,7 +214,8 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 3;
+ uint8_t force_sw_locality_check : 1;
+ uint8_t reserved : 2;
};
struct mod_hdcp_link_adjustment {
@@ -272,6 +302,10 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
+ struct {
+ uint8_t lc_enable_sw_fallback : 1;
+ uint8_t reserved : 7;
+ } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 758a8aa31fbe..391209a3bf29 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
void reset_replay_dsync_error_count(struct dc_link *link);
+void change_replay_to_psr(struct dc_link *link);
+void change_psr_to_replay(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4c95b885d1d0..bfb446736ca8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -351,9 +351,10 @@ enum DC_DEBUG_MASK {
DC_DISABLE_HDMI_CEC = 0x10000,
/**
- * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
+ * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
+ * Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
- DC_DISABLE_SUBVP = 0x20000,
+ DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
* @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
*/
@@ -366,10 +367,15 @@ enum DC_DEBUG_MASK {
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
+
+ /**
+ * @DC_SKIP_DETECTION_LT: If set, skip detection link training
+ */
+ DC_SKIP_DETECTION_LT = 0x200000,
};
enum amd_dpm_forced_level;
@@ -390,6 +396,7 @@ enum amd_dpm_forced_level;
* (such as allocating any required memory)
* @suspend: handles IP specific hw/sw changes for suspend
* @resume: handles IP specific hw/sw changes for resume
+ * @complete: handles IP specific changes after resume
* @is_idle: returns current IP block idle status
* @wait_for_idle: poll for idle
* @check_soft_reset: check soft reset the IP block
@@ -421,6 +428,7 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
+ void (*complete)(struct amdgpu_ip_block *ip_block);
bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index bd8085ec54ed..2d6a598a6c25 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -5242,6 +5242,8 @@
#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
index 15e5a65cf492..70ee6be94a9b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
@@ -9776,6 +9776,14 @@
#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
#define regDIG0_DIG_BE_EN_CNTL 0x20bd
#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_HDCP_INT_CONTROL 0x20c0
+#define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG0_HDCP_LINK0_STATUS 0x20c1
+#define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_0 0x20c2
+#define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_1 0x20c3
+#define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG0_TMDS_CNTL 0x20e4
#define regDIG0_TMDS_CNTL_BASE_IDX 2
#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
@@ -10081,6 +10089,12 @@
#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
#define regDIG1_DIG_BE_EN_CNTL 0x21e1
#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_HDCP_INT_CONTROL 0x21e4
+#define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_0 0x21e6
+#define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_1 0x21e7
+#define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG1_TMDS_CNTL 0x2208
#define regDIG1_TMDS_CNTL_BASE_IDX 2
#define regDIG1_TMDS_CONTROL_CHAR 0x2209
@@ -10386,6 +10400,12 @@
#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
#define regDIG2_DIG_BE_EN_CNTL 0x2305
#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_HDCP_INT_CONTROL 0x2308
+#define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_0 0x230a
+#define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_1 0x230b
+#define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG2_TMDS_CNTL 0x232c
#define regDIG2_TMDS_CNTL_BASE_IDX 2
#define regDIG2_TMDS_CONTROL_CHAR 0x232d
@@ -10691,6 +10711,12 @@
#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
#define regDIG3_DIG_BE_EN_CNTL 0x2429
#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_HDCP_INT_CONTROL 0x242c
+#define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_0 0x242e
+#define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_1 0x242f
+#define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG3_TMDS_CNTL 0x2450
#define regDIG3_TMDS_CNTL_BASE_IDX 2
#define regDIG3_TMDS_CONTROL_CHAR 0x2451
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
index 5d9d5fea6e06..e3d841b2e9af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
@@ -2847,6 +2847,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe
@@ -2871,6 +2879,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
index c75aee25619e..6f44345277af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -1779,6 +1779,8 @@
#define mmRLC_TTOP_D 0x3105
#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_1 0x2438
+#define mmSPI_STATIC_THREAD_MGMT_2 0x2439
#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
index edc8a793a95d..4dd386b98748 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -234,6 +234,26 @@
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
#define mmIH_STATUS 0x0F88
+
+#define mmDMA_GFX_RB_CNTL 0x3400
+#define mmDMA_GFX_RB_BASE 0x3401
+#define mmDMA_GFX_RB_RPTR 0x3402
+#define mmDMA_GFX_RB_WPTR 0x3403
+#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407
+#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408
+#define mmDMA_GFX_IB_CNTL 0x3409
+#define mmDMA_GFX_IB_RPTR 0x340a
+#define mmDMA_CNTL 0x340b
+#define mmDMA_STATUS_REG 0x340D
+#define mmDMA_TILING_CONFIG 0x342E
+#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define mmDMA_POWER_CNTL 0x342F
+#define mmDMA_CLK_CTRL 0x3430
+#define mmDMA_PG 0x3435
+#define mmDMA_PGFSM_CONFIG 0x3436
+#define mmDMA_PGFSM_WRITE 0x3437
+
#define mmSEM_MAILBOX 0x0F9B
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
#define mmSEM_MAILBOX_CONTROL 0x0F9C
@@ -269,7 +289,4 @@
#define mmVCE_CONFIG 0x0F94
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
-/* from the old sid.h */
-#define mmDMA_TILING_CONFIG 0x342E
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
index 1c540fe136cb..9f7fc2428b69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -823,6 +823,43 @@
#define LX3__RESERVED__SHIFT 0x00000000
#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+
+#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002
+#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003
+#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C
+#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010
+#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F
+
+#define DMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008
+#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L
+#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000
+
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
@@ -1015,6 +1052,10 @@
#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L
+#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005
+#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L
+#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006
#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
index 6b10be61efc3..bdef1f743df7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -41,7 +41,49 @@
#define ixLCAC_MC5_CNTL 0x012B
#define ixLCAC_MC5_OVR_SEL 0x012C
#define ixLCAC_MC5_OVR_VAL 0x012D
+
+#define mmCG_SPLL_FUNC_CNTL 0x0180
+#define mmCG_SPLL_FUNC_CNTL_2 0x0181
+#define mmCG_SPLL_FUNC_CNTL_3 0x0182
+#define mmCG_SPLL_FUNC_CNTL_4 0x0183
+#define mmCG_SPLL_STATUS 0x0185
+#define mmSPLL_CNTL_MODE 0x0186
+#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188
+#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189
+#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B
+#define mmMPLL_BYPASSCLK_SEL 0x0197
+#define mmCG_CLKPIN_CNTL 0x0198
+#define mmCG_CLKPIN_CNTL_2 0x0199
+#define mmTHM_CLK_CNTL 0x019B
+#define mmMISC_CLK_CNTL 0x019C
+#define mmCG_THERMAL_CTRL 0x01C0
+#define mmCG_THERMAL_STATUS 0x01C1
+#define mmCG_THERMAL_INT 0x01C2
+#define mmCG_MULT_THERMAL_CTRL 0x01C4
+#define mmCG_MULT_THERMAL_STATUS 0x01C5
+#define mmCG_FDO_CTRL0 0x01D5
+#define mmCG_FDO_CTRL1 0x01D6
+#define mmCG_FDO_CTRL2 0x01D7
+#define mmCG_TACH_CTRL 0x01DC
+#define mmCG_TACH_STATUS 0x01DD
+#define mmGENERAL_PWRMGT 0x1E0
+#define mmCG_TPC 0x1E1
+#define mmSCLK_PWRMGT_CNTL 0x1E2
+#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6
+#define mmCG_FTV 0x01EF
+#define mmCG_FFCT_0 0x01F0
+#define mmCG_BSP 0x01FF
+#define mmCG_AT 0x0200
+#define mmCG_GIT 0x0201
+#define mmCG_SSP 0x0203
+#define mmCG_DISPLAY_GAP_CNTL 0x020A
+#define mmCG_ULV_CONTROL 0x021E
+#define mmCG_ULV_PARAMETER 0x021F
+#define mmSMC_SCRATCH0 0x0221
+#define mmCG_CAC_CTRL 0x022E
+
#define ixSMC_PC_C 0x80000370
+
#define ixTHM_TMON0_DEBUG 0x03F0
#define ixTHM_TMON0_INT_DATA 0x0380
#define ixTHM_TMON0_RDIL0_DATA 0x0300
@@ -110,6 +152,7 @@
#define ixTHM_TMON1_RDIR7_DATA 0x0337
#define ixTHM_TMON1_RDIR8_DATA 0x0338
#define ixTHM_TMON1_RDIR9_DATA 0x0339
+
#define mmGPIOPAD_A 0x05E7
#define mmGPIOPAD_EN 0x05E8
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
@@ -127,6 +170,7 @@
#define mmGPIOPAD_STRENGTH 0x05E5
#define mmGPIOPAD_SW_INT_STAT 0x05E4
#define mmGPIOPAD_Y 0x05E9
+
#define mmSMC_IND_ACCESS_CNTL 0x008A
#define mmSMC_IND_DATA_0 0x0081
#define mmSMC_IND_DATA 0x0081
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
index 7d3925b7266e..67d3c7e13a48 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -23,10 +23,142 @@
#ifndef SMU_6_0_SH_MASK_H
#define SMU_6_0_SH_MASK_H
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
-#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_AT__CG_R_MASK 0x0000FFFFL
+#define CG_AT__CG_R__SHIFT 0x00000000
+#define CG_AT__CG_L_MASK 0xFFFF0000L
+#define CG_AT__CG_L__SHIFT 0x00000010
+
+#define CG_BSP__BSP_MASK 0x0000FFFFL
+#define CG_BSP__BSP__SHIFT 0x00000000
+#define CG_BSP__BSU_MASK 0x000F0000L
+#define CG_BSP__BSU__SHIFT 0x00000010
+
+#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL
+#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000
+
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008
+
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A
+
+#define CG_FFCT_0__UTC_0_MASK 0x000003FFL
+#define CG_FFCT_0__UTC_0__SHIFT 0x00000000
+#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L
+#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A
+
+#define CG_GIT__CG_GICST_MASK 0x0000FFFFL
+#define CG_GIT__CG_GICST__SHIFT 0x00000000
+#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L
+#define CG_GIT__CG_GIPOT__SHIFT 0x00000010
+
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009
+
+#define CG_SSP__SST_MASK 0x0000FFFFL
+#define CG_SSP__SST__SHIFT 0x00000000
+#define CG_SSP__SSTU_MASK 0x000F0000L
+#define CG_SSP__SSTU__SHIFT 0x00000010
+
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019
+
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009
+
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019
+
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000
+
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017
+
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
@@ -195,6 +327,7 @@
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
@@ -243,6 +376,37 @@
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008
+
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001
+#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L
+#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E
+
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
@@ -285,6 +449,7 @@
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
@@ -293,6 +458,8 @@
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
@@ -303,10 +470,25 @@
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004
+
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008
+
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000
+#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L
+#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008
+
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
index 14574112c469..72a118b2af69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -1067,7 +1067,13 @@
#define regVCN_FEATURES_BASE_IDX 1
#define regUVD_GPUIOV_STATUS 0x0055
#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057
+#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15 0x005c
+#define regUVD_RAS_JPEG0_STATUS 0x0059
+#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1
+#define regUVD_RAS_JPEG1_STATUS 0x005a
+#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15_BASE_IDX 1
#define regUVD_VERSION 0x005d
#define regUVD_VERSION_BASE_IDX 1
@@ -1147,6 +1153,22 @@
#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+// addressBlock: uvd_mmsch_dec
+// base address: 0x20d2c
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+
+
// addressBlock: uvd_vcn_umsch_dec
// base address: 0x21500
#define regVCN_UMSCH_MES_CNTL 0x0740
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
index 5c119a6b87fb..c78b09d6fbae 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -5714,6 +5714,22 @@
//UVD_GPUIOV_STATUS
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_RAS_VCPU_VCODEC_STATUS
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
+
+//UVD_RAS_JPEG0_STATUS
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_JPEG1_STATUS
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_SCRATCH15
#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
@@ -5929,6 +5945,29 @@
#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+// addressBlock: uvd_mmsch_dec
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+
+
// addressBlock: uvd_vcn_umsch_dec
//VCN_UMSCH_MES_CNTL
#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b78360a71bc9..b344acefc606 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
constant across VGA or non VGA adapter,
@@ -6017,7 +6017,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000
@@ -6460,7 +6460,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
// ulGPUCapInfo
#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0160d65f3f5e..5c86423c2e92 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -183,6 +183,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
+ ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81,
};
enum atom_dp_vs_preemph_def{
@@ -1713,7 +1714,7 @@ enum atom_system_vbiosmisc_def{
// gpucapinfo
enum atom_system_gpucapinf_def{
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10,
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10,
};
//dpphy_override
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
index 3a4670bc4449..b98b7ae551b5 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
@@ -48,6 +48,7 @@
#define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence
#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
new file mode 100644
index 000000000000..467897ec2e65
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IRQSRCS_GFX_12_0_0_H__
+#define __IRQSRCS_GFX_12_0_0_H__
+
+#define GFX_12_0_0__SRCID__UTCL2_FAULT 0 // UTCL2 has encountered a fault or retry scenario
+#define GFX_12_0_0__SRCID__UTCL2_DATA_POISONING 1 // UTCL2 for data poisoning
+#define GFX_12_0_0__SRCID__MEM_ACCES_MON 10 // 0x0A EA memory access monitor interrupt
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_RTN_DONE 48 // 0x30 SDMA atomic*_rtn ops complete
+#define GFX_12_0_0__SRCID__SDMA_TRAP 49 // 0x31 Trap
+#define GFX_12_0_0__SRCID__SDMA_SRBMWRITE 50 // 0x32 SRBM write Protection
+#define GFX_12_0_0__SRCID__SDMA_CTXEMPTY 51 // 0x33 Context Empty
+#define GFX_12_0_0__SRCID__SDMA_PREEMPT 52 // 0x34 SDMA New Run List
+#define GFX_12_0_0__SRCID__SDMA_IB_PREEMPT 53 // 0x35 sdma mid - command buffer preempt interrupt
+#define GFX_12_0_0__SRCID__SDMA_DOORBELL_INVALID 54 // 0x36 Doorbell BE invalid
+#define GFX_12_0_0__SRCID__SDMA_QUEUE_HANG 55 // 0x37 Queue hang or Command timeout
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_TIMEOUT 56 // 0x38 SDMA atomic CMPSWAP loop timeout
+#define GFX_12_0_0__SRCID__SDMA_POLL_TIMEOUT 57 // 0x39 SRBM read poll timeout
+#define GFX_12_0_0__SRCID__SDMA_PAGE_TIMEOUT 58 // 0x3A Page retry timeout after UTCL2 return nack = 1
+#define GFX_12_0_0__SRCID__SDMA_PAGE_NULL 59 // 0x3B Page Null from UTCL2 when nack = 2
+#define GFX_12_0_0__SRCID__SDMA_PAGE_FAULT 60 // 0x3C Page Fault Error from UTCL2 when nack = 3
+#define GFX_12_0_0__SRCID__SDMA_VM_HOLE 61 // 0x3D MC or SEM address in VM hole
+#define GFX_12_0_0__SRCID__SDMA_ECC 62 // 0x3E ECC Error
+#define GFX_12_0_0__SRCID__SDMA_FROZEN 63 // 0x3F SDMA Frozen
+#define GFX_12_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
+#define GFX_12_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
+#define GFX_12_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_12_0_0__SRCID__SDMA_FENCE 70 // 0x46 User fence
+#define GFX_12_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
+#define GFX_12_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
+#define GFX_12_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
+#define GFX_12_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
+#define GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR 183 // 0xB7 Bad Opcode Error
+#define GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT 184 // 0xB8 Privileged Register Fault
+#define GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT 185 // 0xB9 Privileged Instr Fault
+#define GFX_12_0_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 // 0xBA Wait Memory Semaphore Fault (Sync Object Fault)
+#define GFX_12_0_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 // 0xBB Context Empty Interrupt
+#define GFX_12_0_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 // 0xBC Context Busy Interrupt
+#define GFX_12_0_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 // 0xC0 CP.ME Wait_Reg_Mem Poll Timeout
+#define GFX_12_0_0__SRCID__CP_SIG_INCOMPLETE 193 // 0xC1 "Surface Probe Fault Signal Incomplete"
+#define GFX_12_0_0__SRCID__CP_PREEMPT_ACK 194 // 0xC2 Preemption Ack-wledge
+#define GFX_12_0_0__SRCID__CP_GPF 195 // 0xC3 General Protection Fault (GPF)
+#define GFX_12_0_0__SRCID__CP_GDS_ALLOC_ERROR 196 // 0xC4 GDS Alloc Error
+#define GFX_12_0_0__SRCID__CP_ECC_ERROR 197 // 0xC5 ECC Error
+#define GFX_12_0_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 // 0xC7 Compute query status
+#define GFX_12_0_0__SRCID__CP_VM_DOORBELL 200 // 0xC8 Unattached VM Doorbell Received
+#define GFX_12_0_0__SRCID__CP_FUE_ERROR 201 // 0xC9 ECC FUE Error
+#define GFX_12_0_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 // 0xCA Streaming Perf Monitor Interrupt
+#define GFX_12_0_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 // 0xE8 CRead timeout error
+#define GFX_12_0_0__SRCID__GRBM_REG_GUI_IDLE 233 // 0xE9 Register GUI Idle
+#define GFX_12_0_0__SRCID__SQ_INTERRUPT_ID 239 // 0xEF SQ Interrupt (ttrace wrap, errors)
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 21dc956b5f35..e2b1ea7467b0 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -108,6 +108,8 @@ enum pp_clock_type {
PP_VCLK1,
PP_DCLK,
PP_DCLK1,
+ PP_ISPICLK,
+ PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
@@ -128,6 +130,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_VDDBOARD,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
@@ -493,6 +496,7 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
struct pp_smu_wm_range_sets *ranges);
@@ -1591,4 +1595,27 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+struct amdgpu_partition_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_XCC];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+ uint16_t padding;
+
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
+ uint64_t gfx_low_utilization_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index f8008270f813..3728389fc3be 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -535,8 +535,8 @@ struct v11_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v11_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v11_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h
index 5eabab611b02..03a35f8a65b0 100644
--- a/drivers/gpu/drm/amd/include/v12_structs.h
+++ b/drivers/gpu/drm/amd/include/v12_structs.h
@@ -535,8 +535,8 @@ struct v12_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v12_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v12_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 3533d43ed1e7..71d986dd7a6e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
case AMD_IP_BLOCK_TYPE_VPE:
+ case AMD_IP_BLOCK_TYPE_ISP:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, 0));
@@ -329,6 +330,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_link_reset = false;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_link_reset = smu_link_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_link_reset;
+}
+
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_link_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
@@ -780,6 +809,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -809,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
uint32_t max)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret = 0;
-
- if (type != PP_SCLK)
- return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- mutex_lock(&adev->pm.mutex);
- ret = smu_set_soft_freq_range(smu,
- SMU_SCLK,
+ guard(mutex)(&adev->pm.mutex);
+
+ return smu_set_soft_freq_range(smu,
+ type,
min,
max);
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
@@ -1654,6 +1692,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
+{
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu->od_enabled;
+ } else {
+ struct pp_hwmgr *hwmgr;
+
+ /*
+ * dpm on some legacy asics don't carry od_enabled member
+ * as its pp_handle is casted directly from adev.
+ */
+ if (amdgpu_dpm_is_legacy_dpm(adev))
+ return false;
+
+ hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
+
+ return hwmgr->od_enabled;
+ }
+}
+
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
@@ -1976,3 +2036,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
return ret;
}
+
+/**
+ * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific XCP, including details such as
+ * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_xcp_metrics)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 922def51685b..4b64851fdb42 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (ret)
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
while (isspace(*tmp_str))
tmp_str++;
}
@@ -1606,7 +1608,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1617,18 +1618,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -1897,7 +1892,7 @@ out:
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -1908,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
uint32_t ss_power;
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
(void *)&ss_power))
@@ -2944,6 +2939,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2951,6 +2963,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3294,6 +3312,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3341,6 +3361,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3492,6 +3514,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
@@ -3618,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf,
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
+
while (isspace(*tmp_str))
tmp_str++;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 4c0f7ad14816..768317ee1486 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -416,11 +416,13 @@ int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
@@ -522,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -559,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size);
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev);
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size);
@@ -607,5 +612,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 59fae668dc3f..307ebf7e3226 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, enable);
if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
}
}
@@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = kv_process_firmware_header(adev);
if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
+ drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n");
return ret;
}
kv_init_fps_limits(adev);
kv_init_graphics_levels(adev);
ret = kv_program_bootup_state(adev);
if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n");
return ret;
}
kv_calculate_dfs_bypass_settings(adev);
ret = kv_upload_dpm_settings(adev);
if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
+ drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n");
return ret;
}
ret = kv_populate_uvd_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n");
return ret;
}
ret = kv_populate_vce_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n");
return ret;
}
ret = kv_populate_samu_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n");
return ret;
}
ret = kv_populate_acp_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n");
return ret;
}
kv_program_vc(adev);
@@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
if (pi->enable_auto_thermal_throttling) {
ret = kv_enable_auto_thermal_throttling(adev);
if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n");
return ret;
}
}
ret = kv_enable_dpm_voltage_scaling(adev);
if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n");
return ret;
}
ret = kv_set_dpm_interval(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n");
return ret;
}
ret = kv_set_dpm_boot_state(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n");
return ret;
}
ret = kv_enable_ulv(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n");
return ret;
}
kv_start_dpm(adev);
ret = kv_enable_didt(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_didt failed\n");
return ret;
}
ret = kv_enable_smc_cac(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n");
return ret;
}
@@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = amdgpu_kv_smc_bapm_enable(adev, false);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
@@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n");
return ret;
}
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
@@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
err = amdgpu_kv_smc_bapm_enable(adev, false);
if (err)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
@@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
}
@@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle)
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_sclk_t(adev);
@@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_set_enabled_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_acp_boot_level(adev);
@@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
@@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
data_offset);
if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
@@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
else
pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n");
}
if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
@@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(adev,
@@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps)
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n",
+ rps->vclk, rps->dclk);
for (i = 0; i < ps->num_levels; i++) {
struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+ drm_dbg(adev_to_drm(adev),
+ "power level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static void kv_dpm_fini(struct amdgpu_device *adev)
@@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- DRM_INFO("amdgpu: dpm initialized\n");
+ drm_info(adev_to_drm(adev), "dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
+ drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index c7518b13e787..ea3ace882a10 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -47,7 +47,7 @@
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
{
const char *s;
@@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2)
s = "performance";
break;
}
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
+ drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
(class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
+ else
+ drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
}
-void amdgpu_dpm_print_cap_info(u32 caps)
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
+ (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
+ (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
+ (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
}
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
+ rps == adev->pm.dpm.current_ps ? " c" : "",
+ rps == adev->pm.dpm.requested_ps ? " r" : "",
+ rps == adev->pm.dpm.boot_ps ? " b" : "");
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
return -EINVAL;
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
- printk("switching from power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching from power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching to power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
index 93bd3973330c..7120eef30509 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -23,10 +23,9 @@
#ifndef __LEGACY_DPM_H__
#define __LEGACY_DPM_H__
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2);
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps);
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps);
int amdgpu_get_platform_caps(struct amdgpu_device *adev);
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 1c25f3023e93..52e732be59e3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -30,16 +30,32 @@
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
-#include "sid.h"
+#include "atom.h"
+#include "gfx_v6_0.h"
#include "r600_dpm.h"
+#include "sid.h"
#include "si_dpm.h"
-#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
+#include"gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
if (xclk == 0)
return 0;
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
@@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
} else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
+ SQ_POWER_THROTTLE__MIN_POWER_MASK;
+ sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
@@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
if (!cac_tables)
return -ENOMEM;
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
+ reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
+ reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
+ WREG32(mmCG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
@@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
@@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
+ column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
+ bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
@@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
} else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
}
@@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
static void si_start_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
}
@@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
- WREG32(SMC_SCRATCH0, parameter);
+ WREG32(mmSMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
@@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
@@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
}
#if 0
@@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
+ WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
+ ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
}
}
@@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev)
u32 tmp, pipe;
int i;
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
@@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
if (enable) {
if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
} else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
}
}
@@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+ pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
+ pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
- WREG32(CG_BSP, pi->dsp);
+ WREG32(mmCG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+ WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
@@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev)
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+ WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
- WREG32(CG_TPC, R600_TPC_DFLT);
+ WREG32(mmCG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+ WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
- WREG32(CG_FTV, pi->vrc);
+ WREG32(mmCG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
- WREG32(CG_FTV, 0);
+ WREG32(mmCG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
@@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
if (tmp >= 4)
dram_rows = 16384;
@@ -4909,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
- reg = CG_R(0xffff) | CG_L(0);
+ reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
@@ -4935,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
+ SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5057,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
@@ -5102,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5250,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
@@ -5294,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+ spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
+ spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
+ spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5314,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
+ cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
+ cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
}
}
@@ -5485,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5579,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
return -EINVAL;
if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
+ a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
@@ -5600,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
+ a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
@@ -6180,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
@@ -6204,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
return (u16)speed_cntl;
}
@@ -6412,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
+ u32 thermal_int = RREG32(mmCG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
+ WREG32(mmCG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32(mmCG_THERMAL_INT, thermal_int);
}
return 0;
@@ -6447,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
return -EINVAL;
}
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
@@ -6463,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
@@ -6495,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
return 0;
}
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
@@ -6531,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
@@ -6590,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
if (adev->pm.no_fan)
return -ENOENT;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6621,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
if (speed > 255)
return -EINVAL;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6630,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
do_div(tmp64, 255);
duty = (u32)tmp64;
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32(mmCG_FDO_CTRL0, tmp);
return 0;
}
@@ -6672,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
if (si_pi->fan_is_controlled_by_smc)
return 0;
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+ tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return 0;
}
@@ -6691,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
if (tach_period == 0)
return -ENOENT;
@@ -6720,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
@@ -6736,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
@@ -6760,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
@@ -7530,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
@@ -7554,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7571,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7883,8 +7905,8 @@ static int si_dpm_get_temp(void *handle)
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
+ temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
if (temp & 0x200)
actual_temp = 255;
@@ -7929,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle,
struct rv7xx_pl *pl;
int i;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
@@ -8014,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx,
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..4e65ab9e931c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -30,6 +30,12 @@
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
@@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,
if ((smc_address + 3) > limit)
return -EINVAL;
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
@@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
@@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- original_data = RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
}
done:
@@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
@@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
- WREG32(SMC_MESSAGE_0, msg);
+ WREG32(mmSMC_MESSAGE_0, msg);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(SMC_RESP_0);
+ return (PPSMC_Result)RREG32(mmSMC_RESP_0);
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
@@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
@@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
+ *value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
@@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
+ WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 4bd92fd782be..8d40ed0f0e83 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 9d3b33446adc..9b20076e26c0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
}
if (le32_to_cpu(info->ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableDFSBypass);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 79a566f3564a..c305ea4ec17d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
}
cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
}
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index 5a010cd38303..baf51cd82a35 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
}
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..63e428ceaee4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
@@ -53,8 +53,6 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
- uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 46cce1d2aaf3..b47cb4a5f488 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -76,6 +76,10 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,12 +138,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
}
int smu_set_soft_freq_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
+ enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
+ enum smu_clk_type clk_type;
int ret = 0;
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
@@ -307,6 +316,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret;
+
+ if (!smu->ppt_funcs->dpm_set_isp_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->isp_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->isp_gated, !enable);
+
+ return ret;
+}
+
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -408,6 +437,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_ISP:
+ ret = smu_dpm_set_isp_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -1004,6 +1039,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
+static void smu_update_gpu_addresses(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
+
+ if (pm_status_table->bo)
+ pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
+ if (driver_table->bo)
+ driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
+ if (dummy_read_1_table->bo)
+ dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
+}
+
/**
* smu_alloc_memory_pool - allocate memory pool in the system memory
*
@@ -1285,6 +1335,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu_init_power_profile(smu);
@@ -1672,37 +1723,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
}
- ret = smu_system_features_control(smu, true);
- if (ret) {
- dev_err(adev->dev, "Failed to enable requested dpm features!\n");
- return ret;
- }
-
- smu_init_xgmi_plpd_mode(smu);
-
- ret = smu_feature_get_enabled_mask(smu, &features_supported);
- if (ret) {
- dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
- return ret;
- }
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
-
- if (!smu_is_dpm_running(smu))
- dev_info(adev->dev, "dpm has been disabled\n");
-
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
pcie_gen = 4;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -1738,6 +1758,37 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_system_features_control(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable requested dpm features!\n");
+ return ret;
+ }
+
+ smu_init_xgmi_plpd_mode(smu);
+
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
+ if (!smu_is_dpm_running(smu))
+ dev_info(adev->dev, "dpm has been disabled\n");
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ ret = smu_set_default_dpm_table(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+ return ret;
+ }
+
ret = smu_get_thermal_temperature_range(smu);
if (ret) {
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
@@ -1780,6 +1831,9 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ smu_update_gpu_addresses(smu);
+
smu->smc_fw_state = SMU_FW_INIT;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -2144,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2175,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
+ if (smu->current_power_limit) {
+ ret = smu_set_power_limit(smu, smu->current_power_limit);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -2935,6 +3002,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCLK; break;
case PP_DCLK1:
clk_type = SMU_DCLK1; break;
+ case PP_ISPICLK:
+ clk_type = SMU_ISPICLK;
+ break;
+ case PP_ISPXCLK:
+ clk_type = SMU_ISPXCLK;
+ break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
@@ -3432,15 +3505,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
bool ret = false;
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
+ if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
+ ret = smu->ppt_funcs->link_reset_is_support(smu);
return ret;
}
@@ -3475,6 +3548,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3745,6 +3831,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
@@ -3803,6 +3902,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
+ .get_xcp_metrics = smu_sys_get_xcp_metrics,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -3975,3 +4075,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index dd6d0e7aa242..b52e194397e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -402,6 +402,7 @@ struct smu_power_gate {
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
+ atomic_t isp_gated;
atomic_t umsch_mm_gated;
};
@@ -438,9 +439,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -1229,10 +1232,11 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
/**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
+ * @link_reset_is_support: Check if GPU supports link reset.
*/
- bool (*mode2_reset_is_support)(struct smu_context *smu);
+ bool (*link_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1252,6 +1256,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1383,6 +1394,11 @@ struct pptable_funcs {
bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1421,6 +1437,12 @@ struct pptable_funcs {
int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
/**
+ * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
+ * management.
+ */
+ int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
+
+ /**
* @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
* management.
*/
@@ -1451,6 +1473,12 @@ struct pptable_funcs {
*/
int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
struct freq_band_range *exclusion_ranges);
+ /**
+ * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
+ * Return: Size of table
+ */
+ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
+ void *table);
};
typedef enum {
@@ -1601,8 +1629,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1613,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
uint32_t min, uint32_t max);
int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
@@ -1643,6 +1672,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 1bc30db22f9c..cd44f4254134 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -106,6 +106,7 @@ typedef struct {
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_MEM_PSTATE_LEVELS 4
+#define ISP_ALL_TILES_MASK 0x7FF
typedef struct {
uint32_t UClk;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index d7505cfc433a..0a2ca544f4e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -86,8 +86,10 @@ typedef enum {
/*36*/ FEATURE_PIT = 36,
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+/*39*/ FEATURE_GLOBAL_DPM = 39,
+/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
-/*39*/ NUM_FEATURES = 39
+/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -133,7 +135,7 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x12
+#define SMU_METRICS_TABLE_VERSION 0x13
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -275,6 +277,16 @@ typedef struct {
//PSNs
uint64_t PublicSerialNumber_AID[4];
uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index d26f35119a12..01790a927930 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -127,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x10
+#define SMU_METRICS_TABLE_VERSION 0x11
// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
@@ -459,4 +459,13 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+ // General info
+ uint32_t pldmVersion[2];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 288b2576432b..41f268313613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,7 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_ResetVCN 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c9dee09395e3..d7a9e41820fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -277,6 +277,7 @@
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
@@ -304,6 +305,8 @@ enum smu_clk_type {
SMU_MCLK,
SMU_PCIE,
SMU_LCLK,
+ SMU_ISPICLK,
+ SMU_ISPXCLK,
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cd03caffe317..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -112,6 +112,7 @@ struct smu_13_0_dpm_context {
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
-bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
-int smu_v13_0_12_get_max_metrics_size(void);
-int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
-int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
-extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
-extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 453952cdc353..9ad46f545d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1347,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7fad5dfb39c4..aac202d0c30e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
@@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
- (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
- pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
-
- if (ret)
- return ret;
-
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (pptable->PcieLaneCount[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
+ pptable->PcieLaneCount[i] > pcie_width_cap) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_gen_cap << 8;
+ smu_pcie_arg |= pcie_width_cap;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static inline void navi10_dump_od_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..d57591509aed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t min_gen_speed, max_gen_speed;
uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
@@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16 |
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) ||
+ table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) {
+ smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
@@ -3089,11 +3093,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3228,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 25fabf336a64..78e4186d06cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a55ea76d7399..2c9869feba61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_CCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
break;
case SMU_SOCCLK:
/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9481f897432d..e97b0cf19197 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMinGfxclkFrequency,
- 0, &min);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMaxGfxclkFrequency,
- 0, &max);
- if (ret)
- return ret;
- size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
break;
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
- max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sysfs_emit_at(buf, size, "OD_SCLK\n");
- size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
- size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
- }
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 83163d7c7f00..c63d2e28954d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
+static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk;
+
+ if (amdgpu_sriov_vf(smu->adev)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+
+ if (min) {
+ if (!min_clk)
+ return -ENODATA;
+ *min = min_clk;
+ }
+ if (max) {
+ if (!max_clk)
+ return -ENODATA;
+ *max = max_clk;
+ }
+
+ } else {
+ return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ }
+
+ return 0;
+}
+
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -1270,6 +1325,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1338,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1483,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1976,11 +2040,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2077,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.get_bamaco_support = aldebaran_get_bamaco_support,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
@@ -2086,7 +2145,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ba5a9012dbd5..1a1f2a6b2e52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -709,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -761,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1073,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1647,45 +1579,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2493,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
if (!num_of_levels)
return 0;
@@ -2509,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
}
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
+ return ret;
}
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
@@ -2595,3 +2497,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 5a9711e8cf68..e084ed99ec0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ uint32_t smu_pcie_arg;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 238bd71baa6d..02a455a31c25 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,28 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
-static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
+static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
- struct smu_table *table = &smu_table->driver_table;
+ uint16_t max_speed;
+ uint8_t max_width;
int ret;
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
- if (ret) {
- dev_info(smu->adev->dev,
- "Failed to export static metrics table!\n");
- return ret;
- }
-
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+ if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ ret = 0;
+ } else {
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
- return 0;
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (!ret) {
+ max_width = (uint8_t)metrics->XgmiWidth;
+ max_speed = (uint16_t)metrics->XgmiBitrate;
+ }
+ }
+ if (!ret)
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
}
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
@@ -217,7 +223,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
int ret, i;
if (!pptable->Init) {
- ret = smu_v13_0_12_get_static_metrics_table(smu);
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
@@ -257,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ }
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -283,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
int xcc_id;
/* For clocks with multiple instances, only report the first one */
@@ -339,14 +356,70 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- return ret;
+ return 0;
+}
+
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
+{
+ const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ MetricsTable_t *metrics;
+ int inst, j, k, idx;
+ u32 inst_mask;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instance */
+ inst = GET_INST(VCN, k);
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(metrics->
+ JpegBusy[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ metrics->VclkFrequency[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ metrics->DclkFrequency[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ metrics->SocclkFrequency[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(metrics->UclkFrequency);
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]);
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+ idx++;
+ }
+
+ return sizeof(*xcp_metrics);
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
u8 num_jpeg_rings_gpu_metrics;
@@ -354,10 +427,9 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
struct amdgpu_xcp *xcp;
u32 inst_mask;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+ metrics = (MetricsTable_t *)smu_metrics;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
@@ -436,13 +508,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
+ gpu_metrics->xgmi_write_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -474,6 +549,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
SMUQ10_ROUND(metrics->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -484,7 +569,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->firmware_timestamp = metrics->Timestamp;
*table = (void *)gpu_metrics;
- kfree(metrics);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index c478b3be37af..9cc294f4708b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
-
-enum smu_v13_0_6_caps {
- SMU_CAP(DPM),
- SMU_CAP(DPM_POLICY),
- SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(SET_UCLK_MAX),
- SMU_CAP(PCIE_METRICS),
- SMU_CAP(MCA_DEBUG_MODE),
- SMU_CAP(PER_INST_METRICS),
- SMU_CAP(CTF_LIMIT),
- SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND),
- SMU_CAP(SDMA_RESET),
- SMU_CAP(STATIC_METRICS),
- SMU_CAP(ALL),
-};
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -194,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
@@ -299,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
dpm_context->caps &= ~BIT_ULL(cap);
}
-static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
- enum smu_v13_0_6_caps cap)
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -328,6 +312,11 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551600) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
@@ -353,6 +342,14 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (fw_ver >= 0x04560100) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,6 +399,17 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -525,7 +533,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -682,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
- void *metrics_table, bool bypass_cache)
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
@@ -747,9 +755,48 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
@@ -758,8 +805,11 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
+ uint16_t max_speed;
+ uint8_t max_width;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
@@ -792,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
+ max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
+ max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
@@ -813,6 +866,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
return 0;
@@ -822,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- uint32_t clock_limit = 0, param;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ /* Use dpm tables, if data is already fetched */
+ if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
- if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
- if (pptable->Init)
- clock_limit = pptable->MinGfxclkFrequency;
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
- if (pptable->Init)
- clock_limit = pptable->SocclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
- if (pptable->Init)
- clock_limit = pptable->FclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
- if (pptable->Init)
- clock_limit = pptable->VclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
- if (pptable->Init)
- clock_limit = pptable->DclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
- break;
+ return -EINVAL;
}
- if (min)
- *min = clock_limit;
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+ if (min)
+ *min = min_clk;
if (max)
- *max = clock_limit;
+ *max = max_clk;
- return 0;
+ if (min_clk && max_clk)
+ return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
@@ -1142,7 +1201,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
@@ -1327,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- min_clk = pstate_table->gfxclk_pstate.curr.min;
- max_clk = pstate_table->gfxclk_pstate.curr.max;
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ min_clk = single_dpm_table->min;
+ max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -1616,6 +1677,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1660,6 +1722,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1927,7 +1998,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2140,7 +2211,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2483,11 +2554,131 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
return pcie_gen_to_speed(speed_level + 1);
}
+static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
+ void *table)
+{
+ const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, inst, i, j, k, idx;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+ bool per_inst;
+
+ if (!table)
+ return sizeof(*xcp_metrics);
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ if (xcp->id == xcp_id)
+ break;
+ }
+ if (i == adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_v0)
+ return -ENOMEM;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret) {
+ kfree(metrics_v0);
+ return ret;
+ }
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
+ goto out;
+ }
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(VclkFrequency, version)[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(DclkFrequency, version)[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
+
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
+ version)[inst]);
+
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ xcp_metrics->gfx_low_utilization_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
+ }
+ idx++;
+ }
+ }
+out:
+ kfree(metrics_v0);
+
+ return sizeof(*xcp_metrics);
+}
+
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
@@ -2496,24 +2687,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
if (ret) {
kfree(metrics_v0);
return ret;
}
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_get_gpu_metrics(smu, table);
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
+ kfree(metrics_v0);
+ return num_bytes;
+ }
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
@@ -2623,13 +2819,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
+ gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -2666,6 +2865,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -2844,14 +3057,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x1)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2924,6 +3152,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3584,11 +3825,13 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
+ .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
@@ -3596,6 +3839,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 83745909e564..67b30674fd31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,6 +26,7 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
typedef enum {
/*0*/ METRICS_VERSION_V0 = 0,
@@ -51,6 +52,40 @@ struct PPTable_t {
bool Init;
};
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(PLDM_VERSION),
+ SMU_CAP(ALL),
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
+ struct amdgpu_xcp *xcp, void *table,
+ void *smu_metrics);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c8f4f6fb4083..c96fa5e49ed6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ int link_level;
+ uint32_t smu_pcie_arg;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 76c1adda83db..f9b0938c57ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 84f9b007b59f..fe00c84b1cc6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ u32 min,
+ u32 max,
+ bool __always_unused automatic)
{
- enum smu_message_type msg_set_min, msg_set_max;
- int ret = 0;
+ enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT;
+ enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT;
+ int ret = -EINVAL;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
return -EINVAL;
@@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
+ case SMU_ISPICLK:
+ msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq;
+ break;
+ case SMU_ISPXCLK:
+ msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq;
+ break;
default:
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
- if (ret)
- return ret;
+ if (min && msg_set_min != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+
+ if (max && msg_set_max != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
- return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ return ret;
}
static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
@@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
if (ret)
break;
- ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
break;
default:
ret = -EINVAL;
@@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ false);
if (ret)
return ret;
}
@@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK1,
vclk1_min,
- vclk1_max);
+ vclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
@@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK1,
dclk1_min,
- dclk1_max);
+ dclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_v14_0_0_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile,
+ ISP_ALL_TILES_MASK, NULL);
+}
+
static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
@@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 82c2db972491..3aea32baea3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_14_0_dpm_table *dpm_table;
- struct smu_14_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
-
- if (link_level == 0)
- link_level++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
- int num_of_levels = pcie_table->num_of_link_levels;
+ int num_of_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+ num_of_levels = pcie_table->num_of_link_levels;
if (!num_of_levels)
return 0;
@@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
- }
-
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
- if (ret)
- return ret;
+ if (ret)
+ break;
+ }
+ }
}
- return 0;
+ return ret;
}
static const struct smu_temperature_range smu14_thermal_policy[] = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 80eb1a03b3ca..59f9abd0f7b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
@@ -1051,73 +1061,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu)
false);
}
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
-{
- struct metrics_table_header *header = (struct metrics_table_header *)table;
- uint16_t structure_size;
-
-#define METRICS_VERSION(a, b) ((a << 16) | b)
-
- switch (METRICS_VERSION(frev, crev)) {
- case METRICS_VERSION(1, 0):
- structure_size = sizeof(struct gpu_metrics_v1_0);
- break;
- case METRICS_VERSION(1, 1):
- structure_size = sizeof(struct gpu_metrics_v1_1);
- break;
- case METRICS_VERSION(1, 2):
- structure_size = sizeof(struct gpu_metrics_v1_2);
- break;
- case METRICS_VERSION(1, 3):
- structure_size = sizeof(struct gpu_metrics_v1_3);
- break;
- case METRICS_VERSION(1, 4):
- structure_size = sizeof(struct gpu_metrics_v1_4);
- break;
- case METRICS_VERSION(1, 5):
- structure_size = sizeof(struct gpu_metrics_v1_5);
- break;
- case METRICS_VERSION(1, 6):
- structure_size = sizeof(struct gpu_metrics_v1_6);
- break;
- case METRICS_VERSION(1, 7):
- structure_size = sizeof(struct gpu_metrics_v1_7);
- break;
- case METRICS_VERSION(1, 8):
- structure_size = sizeof(struct gpu_metrics_v1_8);
- break;
- case METRICS_VERSION(2, 0):
- structure_size = sizeof(struct gpu_metrics_v2_0);
- break;
- case METRICS_VERSION(2, 1):
- structure_size = sizeof(struct gpu_metrics_v2_1);
- break;
- case METRICS_VERSION(2, 2):
- structure_size = sizeof(struct gpu_metrics_v2_2);
- break;
- case METRICS_VERSION(2, 3):
- structure_size = sizeof(struct gpu_metrics_v2_3);
- break;
- case METRICS_VERSION(2, 4):
- structure_size = sizeof(struct gpu_metrics_v2_4);
- break;
- case METRICS_VERSION(3, 0):
- structure_size = sizeof(struct gpu_metrics_v3_0);
- break;
- default:
- return;
- }
-
-#undef METRICS_VERSION
-
- memset(header, 0xFF, structure_size);
-
- header->format_revision = frev;
- header->content_revision = crev;
- header->structure_size = structure_size;
-
-}
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a020277dec3e..a608cdbdada4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,31 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
+ struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@@ -125,8 +150,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
int smu_cmn_get_combo_pptable(struct smu_context *smu);
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index df5da5a44755..901f938aefe0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -157,6 +157,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct komeda_dev *mdev = dev->dev_private;
@@ -177,7 +178,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EINVAL);
}
- drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index c61ca98a3a63..02b2b8ae482a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -37,6 +37,7 @@ struct komeda_fb {
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 3cfefadc7c9d..806da0aaedf7 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -11,8 +11,8 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/pixel_format.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
@@ -73,7 +73,17 @@ static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.disable_vblank = hdlcd_crtc_disable_vblank,
};
-static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+static const struct {
+ u32 fourcc;
+ struct pixel_format pixel;
+} supported_formats[] = {
+ { DRM_FORMAT_RGB565, PIXEL_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB1555, PIXEL_FORMAT_XRGB1555 },
+ { DRM_FORMAT_RGB888, PIXEL_FORMAT_RGB888 },
+ { DRM_FORMAT_XRGB8888, PIXEL_FORMAT_XRGB8888 },
+ { DRM_FORMAT_XBGR8888, PIXEL_FORMAT_XBGR8888 },
+ { DRM_FORMAT_XRGB2101010, PIXEL_FORMAT_XRGB2101010},
+};
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
@@ -83,15 +93,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format;
- struct simplefb_format *format = NULL;
+ const struct pixel_format *format = NULL;
int i;
- pixel_format = fb->format->format;
-
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ if (supported_formats[i].fourcc == fb->format->format)
+ format = &supported_formats[i].pixel;
}
if (WARN_ON(!format))
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index e083021e9e99..bc5f5e9798c3 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -306,10 +306,10 @@ malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int n_superblocks = 0;
- const struct drm_format_info *info;
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
@@ -325,8 +325,6 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
return false;
}
- info = drm_get_format_info(dev, mode_cmd);
-
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
@@ -366,24 +364,26 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
- return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
+ return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd);
return false;
}
static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->modifier[0]) {
- if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
+ if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd))
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file, mode_cmd);
+ return drm_gem_fb_create(dev, file, info, mode_cmd);
}
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 34547edf1ee3..87f2e5ee8790 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -159,7 +159,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
}
if (!fourcc_mod_is_vendor(modifier, ARM)) {
- DRM_ERROR("Unknown modifier (not Arm)\n");
+ DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index cf2e88218dc0..aa4289127086 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -18,7 +18,9 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = {
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode,
+ struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
@@ -64,7 +66,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->mod = config;
dfb->fb.obj[0] = &obj->obj;
- drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
+ drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
@@ -84,9 +86,9 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
}
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
@@ -122,7 +124,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err_unref;
}
- dfb = armada_framebuffer_create(dev, mode, obj);
+ dfb = armada_framebuffer_create(dev, info, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index c5bc53d7e0c4..f2b990f055a2 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -17,7 +17,9 @@ struct armada_framebuffer {
#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 6ee7ce04ee71..cb53cc91bafb 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,7 +78,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
return -ENOMEM;
}
- dfb = armada_framebuffer_create(dev, &mode, obj);
+ dfb = armada_framebuffer_create(dev,
+ drm_get_format_info(dev, mode.pixel_format,
+ mode.modifier[0]),
+ &mode, obj);
/*
* A reference is now held by the framebuffer object if
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da0663542e8a..242fbccdf844 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 8d09ba5d5889..2547613155da 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,11 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_2000.o \
+ ast_2100.o \
+ ast_2300.o \
+ ast_2500.o \
+ ast_2600.o \
ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
new file mode 100644
index 000000000000..41c2aa1e425a
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2000_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xFFFFFFFF },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089),
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static void ast_post_chip_2000(struct ast_device *ast)
+{
+ u8 j;
+ u32 temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2000_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2000(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
new file mode 100644
index 000000000000..477ee15eff5d
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static int cbrtest_ast2150(struct ast_device *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = 0xff;
+ dll_min[1] = 0xff;
+ dll_min[2] = 0xff;
+ dll_min[3] = 0xff;
+ dll_max[0] = 0x00;
+ dll_max[1] = 0x00;
+ dll_max[2] = 0x00;
+ dll_max[3] = 0x00;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150) {
+ goto cbr_start;
+ }
+ }
+ if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+static void ast_post_chip_2100(struct ast_device *ast)
+{
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2100 || ast->chip == AST2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2100_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2100(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
new file mode 100644
index 000000000000..dc2a32244689
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2300_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE0 ((1 << 10) - 1)
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x41);
+}
+
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0xc5);
+}
+
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x05);
+}
+
+static int cbr_test(struct ast_device *ast)
+{
+ u32 data;
+ int i;
+
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_device *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test2(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static bool cbr_test3(struct ast_device *ast)
+{
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single(ast, 0))
+ return false;
+ return true;
+}
+
+static bool cbr_scan3(struct ast_device *ast)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
+ }
+ if (loop == 2)
+ return false;
+ }
+ return true;
+}
+
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli)
+ dllmin[cnt] = dlli;
+ if (dllmax[cnt] < dlli)
+ dllmax[cnt] = dlli;
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
+ if (passcnt != 16)
+ goto FINETUNE_START;
+ status = true;
+FINETUNE_DONE:
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ else
+ dlli = (dlli - 1) & 0x7;
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
+} /* finetuneDQI_L */
+
+static void finetuneDQSI(struct ast_device *ast)
+{
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+ for (dlli = 0; dlli < 76; dlli++) {
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068,
+ 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16)dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16)dlli;
+ } else if (passcnt[dqsip] >= 5) {
+ break;
+ } else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
+ }
+ }
+ }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
+ }
+ /* Search margin */
+ g_dqidly = 0;
+ g_dqsip = 0;
+ g_margin = 0;
+ g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff + 2) < g_margin)
+ continue;
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0];
+ dlli > 0 && tag[dqsip][dlli] != 0;
+ dlli--, passcnt[0]++) {
+ }
+ for (dlli = pass[dqidly][dqsip][1];
+ dlli < 76 && tag[dqsip][dlli] != 0;
+ dlli++, passcnt[1]++) {
+ }
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ }
+ }
+ }
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+}
+
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
+
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
+
+CBR_START2:
+ dllmin[0] = 0xff;
+ dllmin[1] = 0xff;
+ dllmax[0] = 0x0;
+ dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli)
+ dllmin[0] = dlli;
+ if (dllmax[0] < dlli)
+ dllmax[0] = dlli;
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli)
+ dllmin[1] = dlli;
+ if (dllmax[1] < dlli)
+ dllmax[1] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (retry++ > 10)
+ goto CBR_DONE2;
+ if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ status = true;
+CBR_DONE2:
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x300;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
+
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x500;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void ast_post_chip_2300(struct ast_device *ast)
+{
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_freq = 396;
+ param.dram_type = AST_DDR3;
+ temp = ast_mindwm(ast, 0x1e6e2070);
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ switch (temp & 0x18000000) {
+ case 0:
+ param.dram_chipid = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 0x08000000:
+ param.dram_chipid = AST_DRAM_1Gx16;
+ break;
+ case 0x10000000:
+ param.dram_chipid = AST_DRAM_2Gx16;
+ break;
+ case 0x18000000:
+ param.dram_chipid = AST_DRAM_4Gx16;
+ break;
+ }
+ switch (temp & 0x0c) {
+ default:
+ case 0x00:
+ param.vram_size = SZ_8M;
+ break;
+ case 0x04:
+ param.vram_size = SZ_16M;
+ break;
+ case 0x08:
+ param.vram_size = SZ_32M;
+ break;
+ case 0x0c:
+ param.vram_size = SZ_64M;
+ break;
+ }
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2300_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2300(ast);
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
new file mode 100644
index 000000000000..1e541498ea67
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+/*
+ * AST2500 DRAM settings modules
+ */
+
+#define REGTBL_NUM 17
+#define REGIDX_010 0
+#define REGIDX_014 1
+#define REGIDX_018 2
+#define REGIDX_020 3
+#define REGIDX_024 4
+#define REGIDX_02C 5
+#define REGIDX_030 6
+#define REGIDX_214 7
+#define REGIDX_2E0 8
+#define REGIDX_2E4 9
+#define REGIDX_2E8 10
+#define REGIDX_2EC 11
+#define REGIDX_2F0 12
+#define REGIDX_2F4 13
+#define REGIDX_2F8 14
+#define REGIDX_RFC 15
+#define REGIDX_PLL 16
+
+static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
+ 0x64604D38, /* 0x010 */
+ 0x29690599, /* 0x014 */
+ 0x00000300, /* 0x018 */
+ 0x00000000, /* 0x020 */
+ 0x00000000, /* 0x024 */
+ 0x02181E70, /* 0x02C */
+ 0x00000040, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x02001300, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001B, /* 0x2E8 */
+ 0x35B8C105, /* 0x2EC */
+ 0x08090408, /* 0x2F0 */
+ 0x9B000800, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x9971452F, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
+ 0x63604E37, /* 0x010 */
+ 0xE97AFA99, /* 0x014 */
+ 0x00019000, /* 0x018 */
+ 0x08000000, /* 0x020 */
+ 0x00000400, /* 0x024 */
+ 0x00000410, /* 0x02C */
+ 0x00000101, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x03002900, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001C, /* 0x2E8 */
+ 0x35B8C106, /* 0x2EC */
+ 0x08080607, /* 0x2F0 */
+ 0x9B000900, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x99714545, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+#define TIMEOUT 5000000
+
+void ast_2500_patch_ahb(void __iomem *regs)
+{
+ u32 data;
+
+ /* Clear bus lock condition */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
+ /*
+ * If "Fast restet" is enabled for ARM-ICE debugger,
+ * then WDT needs to enable, that
+ * WDT04 is WDT#1 Reload reg.
+ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
+ * WDT0C is WDT#1 control reg
+ * [6:5]:= 01:Full chip
+ * [4]:= 1:1MHz clock source
+ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
+ * [0]:= 1:WDT enable
+ */
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
+ udelay(1000);
+ }
+
+ do {
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
+}
+
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0x85);
+}
+
+static bool cbr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static bool ddr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_burst(ast, 1))
+ return false;
+ if (!mmc_test_burst(ast, 2))
+ return false;
+ if (!mmc_test_burst(ast, 3))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static void ddr_init_common_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
+ ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
+ ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
+ ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
+ ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
+}
+
+static void ddr_phy_init_2500(struct ast_device *ast)
+{
+ u32 data, pass, timecnt;
+
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ while (!pass) {
+ for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
+ data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
+ if (!data)
+ break;
+ }
+ if (timecnt != TIMEOUT) {
+ data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
+ if (!data)
+ pass = 1;
+ }
+ if (!pass) {
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ udelay(10); /* delay 10 us */
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
+}
+
+/*
+ * Check DRAM Size
+ * 1Gb : 0x80000000 ~ 0x87FFFFFF
+ * 2Gb : 0x80000000 ~ 0x8FFFFFFF
+ * 4Gb : 0x80000000 ~ 0x9FFFFFFF
+ * 8Gb : 0x80000000 ~ 0xBFFFFFFF
+ */
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
+{
+ u32 reg_04, reg_14;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
+ reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
+
+ ast_moutdwm(ast, 0xA0100000, 0x41424344);
+ ast_moutdwm(ast, 0x90100000, 0x35363738);
+ ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
+ ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
+
+ /* Check 8Gbit */
+ if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
+ reg_04 |= 0x03;
+ reg_14 |= (tRFC >> 24) & 0xFF;
+ /* Check 4Gbit */
+ } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
+ reg_04 |= 0x02;
+ reg_14 |= (tRFC >> 16) & 0xFF;
+ /* Check 2Gbit */
+ } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
+ reg_04 |= 0x01;
+ reg_14 |= (tRFC >> 8) & 0xFF;
+ } else {
+ reg_14 |= tRFC & 0xFF;
+ }
+ ast_moutdwm(ast, 0x1E6E0004, reg_04);
+ ast_moutdwm(ast, 0x1E6E0014, reg_14);
+}
+
+static void enable_cache_2500(struct ast_device *ast)
+{
+ u32 reg_04, data;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004);
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
+
+ do
+ data = ast_mindwm(ast, 0x1E6E0004);
+ while (!(data & 0x80000));
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
+}
+
+static void set_mpll_2500(struct ast_device *ast)
+{
+ u32 addr, data, param;
+
+ /* Reset MMC */
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
+ ast_moutdwm(ast, addr, 0x0);
+ addr += 4;
+ }
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
+ if (data) {
+ /* CLKIN = 25MHz */
+ param = 0x930023E0;
+ ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
+ } else {
+ /* CLKIN = 24MHz */
+ param = 0x93002400;
+ }
+ ast_moutdwm(ast, 0x1E6E2020, param);
+ udelay(100);
+}
+
+static void reset_mmc_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E78505C, 0x00000004);
+ ast_moutdwm(ast, 0x1E785044, 0x00000001);
+ ast_moutdwm(ast, 0x1E785048, 0x00004755);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000013);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E785054, 0x00000077);
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+}
+
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ u32 data, data2, pass, retrycnt;
+ u32 ddr_vref, phy_vref;
+ u32 min_ddr_vref = 0, min_phy_vref = 0;
+ u32 max_ddr_vref = 0, max_phy_vref = 0;
+
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
+ ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
+
+ /* Train PHY Vref first */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ max_phy_vref = 0x0;
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
+ for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ data = ast_mindwm(ast, 0x1E6E03D0);
+ data2 = data >> 8;
+ data = data & 0xff;
+ if (data > data2)
+ data = data2;
+ if (max_phy_vref < data) {
+ max_phy_vref = data;
+ min_phy_vref = phy_vref;
+ }
+ } else if (pass > 0) {
+ break;
+ }
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
+
+ /* Train DDR Vref next */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ min_ddr_vref = 0xFF;
+ max_ddr_vref = 0x0;
+ pass = 0;
+ for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ if (min_ddr_vref > ddr_vref)
+ min_ddr_vref = ddr_vref;
+ if (max_ddr_vref < ddr_vref)
+ max_ddr_vref = ddr_vref;
+ } else if (pass != 0) {
+ break;
+ }
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static bool ast_dram_init_2500(struct ast_device *ast)
+{
+ u32 data;
+ u32 max_tries = 5;
+
+ do {
+ if (max_tries-- == 0)
+ return false;
+ set_mpll_2500(ast);
+ reset_mmc_2500(ast);
+ ddr_init_common_2500(ast);
+
+ data = ast_mindwm(ast, 0x1E6E2070);
+ if (data & 0x01000000)
+ ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
+ else
+ ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
+ } while (!ddr_test_2500(ast));
+
+ ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
+
+ /* Patch code */
+ data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
+ ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
+
+ return true;
+}
+
+static void ast_post_chip_2500(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ /* Clear bus lock condition */
+ ast_2500_patch_ahb(ast->regs);
+
+ /* Disable watchdog */
+ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
+
+ /*
+ * Reset USB port to patch USB unknown device issue
+ * SCU90 is Multi-function Pin Control #5
+ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
+ * port).
+ * SCU94 is Multi-function Pin Control #6
+ * [14:13]:= 1x:USB2.0 Host2 controller
+ * SCU70 is Hardware Strap reg
+ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
+ * [18]: 0(24)/1(48) MHz)
+ * SCU7C is Write clear reg to SCU70
+ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
+ */
+ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
+ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
+ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
+ }
+ /* Modify eSPI reset pin */
+ temp = ast_mindwm(ast, 0x1E6E2070);
+ if (temp & 0x02000000)
+ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ if (!ast_dram_init_2500(ast))
+ drm_err(dev, "DRAM init failed !\n");
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2500_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2500(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
new file mode 100644
index 000000000000..8d75a47444f5
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+int ast_2600_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->tx_chip == AST_TX_ASTDP)
+ return ast_dp_launch(ast);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 139ab00dee8f..2d3ad7610c2e 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -37,6 +37,7 @@
*/
/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
@@ -45,6 +46,21 @@
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
{
u32 csum = 0;
@@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
unsigned int width, unsigned int height)
{
- u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
u32 csum;
csum = ast_cursor_calculate_checksum(src, width, height);
@@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct ast_device *ast = to_ast_device(plane->dev);
struct drm_rect damage;
u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
@@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_plane = &ast_cursor_plane->base;
struct drm_plane *cursor_plane = &ast_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
+ unsigned long size;
+ long offset;
int ret;
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_plane, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
@@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
- ast->vram_fb_available -= size;
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
deleted file mode 100644
index 1e9ac9d6d26c..000000000000
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AST_DRAM_TABLES_H
-#define AST_DRAM_TABLES_H
-
-/* DRAM timing tables */
-struct ast_dramstruct {
- u16 index;
- u32 data;
-};
-
-static const struct ast_dramstruct ast2000_dram_table_data[] = {
- { 0x0108, 0x00000000 },
- { 0x0120, 0x00004a21 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xFFFFFFFF },
- { 0x0004, 0x00000089 },
- { 0x0008, 0x22331353 },
- { 0x000C, 0x0d07000b },
- { 0x0010, 0x11113333 },
- { 0x0020, 0x00110350 },
- { 0x0028, 0x1e0828f0 },
- { 0x0024, 0x00000001 },
- { 0x001C, 0x00000000 },
- { 0x0014, 0x00000003 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000131 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000031 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0028, 0x1e0828f1 },
- { 0x0024, 0x00000003 },
- { 0x002C, 0x1f0f28fb },
- { 0x0030, 0xFFFFFE01 },
- { 0xFFFF, 0xFFFFFFFF }
-};
-
-static const struct ast_dramstruct ast1100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x000041f0 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00050000 },
- { 0x0004, 0x00000585 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x22201724 },
- { 0x0018, 0x1e29011a },
- { 0x0020, 0x00c82222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x032aa02a },
- { 0x0064, 0x002d3000 },
- { 0x0068, 0x00000000 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000732 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000632 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00004c41 },
- { 0xffff, 0xffffffff },
-};
-
-static const struct ast_dramstruct ast2100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x00004120 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00070000 },
- { 0x0004, 0x00000489 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x32302926 },
- { 0x0018, 0x274c0122 },
- { 0x0020, 0x00ce2222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x0f2aa02a },
- { 0x0064, 0x003f3005 },
- { 0x0068, 0x02020202 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000942 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000842 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00005061 },
- { 0xffff, 0xffffffff },
-};
-
-/*
- * AST2500 DRAM settings modules
- */
-#define REGTBL_NUM 17
-#define REGIDX_010 0
-#define REGIDX_014 1
-#define REGIDX_018 2
-#define REGIDX_020 3
-#define REGIDX_024 4
-#define REGIDX_02C 5
-#define REGIDX_030 6
-#define REGIDX_214 7
-#define REGIDX_2E0 8
-#define REGIDX_2E4 9
-#define REGIDX_2E8 10
-#define REGIDX_2EC 11
-#define REGIDX_2F0 12
-#define REGIDX_2F4 13
-#define REGIDX_2F8 14
-#define REGIDX_RFC 15
-#define REGIDX_PLL 16
-
-static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
- 0x64604D38, /* 0x010 */
- 0x29690599, /* 0x014 */
- 0x00000300, /* 0x018 */
- 0x00000000, /* 0x020 */
- 0x00000000, /* 0x024 */
- 0x02181E70, /* 0x02C */
- 0x00000040, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x02001300, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001B, /* 0x2E8 */
- 0x35B8C105, /* 0x2EC */
- 0x08090408, /* 0x2F0 */
- 0x9B000800, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x9971452F, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
- 0x63604E37, /* 0x010 */
- 0xE97AFA99, /* 0x014 */
- 0x00019000, /* 0x018 */
- 0x08000000, /* 0x020 */
- 0x00000400, /* 0x024 */
- 0x00000410, /* 0x02C */
- 0x00000101, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x03002900, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001C, /* 0x2E8 */
- 0x35B8C106, /* 0x2EC */
- 0x08080607, /* 0x2F0 */
- 0x9B000900, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x99714545, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6fbf62a99c48..473faa92d08c 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -171,7 +171,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(regs);
+ ast_2500_patch_ahb(regs);
}
/* Double check that it's actually working */
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d2c2605d2728..e37a55295ed7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -112,12 +112,9 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-#define AST_HWC_SIGNATURE_SIZE 32
-
/*
* Planes
*/
@@ -125,7 +122,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -183,7 +179,6 @@ struct ast_device {
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
@@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -430,16 +417,32 @@ struct ast_crtc_state {
int ast_mm_init(struct ast_device *ast);
+/* ast_2000.c */
+int ast_2000_post(struct ast_device *ast);
+
+/* ast_2100.c */
+int ast_2100_post(struct ast_device *ast);
+
+/* ast_2300.c */
+int ast_2300_post(struct ast_device *ast);
+
+/* ast_2500.c */
+void ast_2500_patch_ahb(void __iomem *regs);
+int ast_2500_post(struct ast_device *ast);
+
+/* ast_2600.c */
+int ast_2600_post(struct ast_device *ast);
+
/* ast post */
int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
int ast_cursor_plane_init(struct ast_device *ast);
/* ast dp501 */
@@ -454,11 +457,12 @@ int ast_astdp_output_init(struct ast_device *ast);
/* ast_mode.c */
int ast_mode_config_init(struct ast_device *ast);
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c3b950675485..b4e8edc7c767 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -29,11 +29,11 @@
*/
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
@@ -51,31 +51,64 @@
#define AST_LUT_SIZE 256
-static inline void ast_load_palette_index(struct ast_device *ast,
- u8 index, u8 red, u8 green,
- u8 blue)
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
+static void ast_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- ast_io_write8(ast, AST_IO_VGADWR, index);
+ struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ ast_io_write8(ast, AST_IO_VGADWR, i8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, red);
+ ast_io_write8(ast, AST_IO_VGAPDR, r8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, green);
+ ast_io_write8(ast, AST_IO_VGAPDR, g8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, blue);
+ ast_io_write8(ast, AST_IO_VGAPDR, b8);
ast_io_read8(ast, AST_IO_VGASRI);
}
-static void ast_crtc_set_gamma_linear(struct ast_device *ast,
- const struct drm_format_info *format)
+static void ast_crtc_fill_gamma(struct ast_device *ast,
+ const struct drm_format_info *format)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_fill_palette_8(crtc, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -84,21 +117,22 @@ static void ast_crtc_set_gamma_linear(struct ast_device *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_device *ast,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ast_crtc_load_gamma(struct ast_device *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_load_palette_8(crtc, lut, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i,
- lut[i].red >> 8,
- lut[i].green >> 8,
- lut[i].blue >> 8);
+ drm_crtc_load_gamma_888(crtc, lut, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -439,7 +473,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
*/
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
@@ -448,7 +482,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -457,6 +490,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -503,7 +543,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -576,12 +616,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -608,13 +648,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -787,11 +825,11 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
- ast_crtc_set_gamma(ast,
- ast_crtc_state->format,
- crtc_state->gamma_lut->data);
+ ast_crtc_load_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ ast_crtc_fill_gamma(ast, ast_crtc_state->format);
}
}
@@ -922,9 +960,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -938,16 +976,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1018,10 +1060,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 91e85e457bdf..b72914dbed38 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -31,51 +31,10 @@
#include <drm/drm_print.h>
-#include "ast_dram_tables.h"
#include "ast_drv.h"
+#include "ast_post.h"
-static void ast_post_chip_2300(struct ast_device *ast);
-static void ast_post_chip_2500(struct ast_device *ast);
-
-static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
-
-static void ast_set_def_ext_reg(struct ast_device *ast)
-{
- u8 i, index, reg;
- const u8 *ext_reg_info;
-
- /* reset scratch */
- for (i = 0x81; i <= 0x9f; i++)
- ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- ext_reg_info = extreginfo_ast2300;
- else
- ext_reg_info = extreginfo;
-
- index = 0xa0;
- while (*ext_reg_info != 0xff) {
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
- index++;
- ext_reg_info++;
- }
-
- /* disable standard IO/MEM decode if secondary */
- /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
-
- /* Set Ext. Default */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
-
- /* Enable RAMDAC for A1 */
- reg = 0x04;
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- reg |= 0x20;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
-}
-
-static u32 __ast_mindwm(void __iomem *regs, u32 r)
+u32 __ast_mindwm(void __iomem *regs, u32 r)
{
u32 data;
@@ -89,7 +48,7 @@ static u32 __ast_mindwm(void __iomem *regs, u32 r)
return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
u32 data;
@@ -113,332 +72,38 @@ void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
__ast_moutdwm(ast->regs, r, v);
}
-/*
- * AST2100/2150 DLL CBR Setting
- */
-#define CBR_SIZE_AST2150 ((16 << 10) - 1)
-#define CBR_PASSNUM_AST2150 5
-#define CBR_THRESHOLD_AST2150 10
-#define CBR_THRESHOLD2_AST2150 10
-#define TIMEOUT_AST2150 5000000
-
-#define CBR_PATNUM_AST2150 8
-
-static const u32 pattern_AST2150[14] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0xFFFE0001,
- 0x683501FE,
- 0x0F1929B0,
- 0x2D0B4346,
- 0x60767F02,
- 0x6FBE36A6,
- 0x3A253035,
- 0x3019686D,
- 0x41C6167E,
- 0x620152BF,
- 0x20F050E0
-};
-
-static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-#endif
-
-static int cbrtest_ast2150(struct ast_device *ast)
-{
- int i;
-
- for (i = 0; i < 8; i++)
- if (mmctestburst2_ast2150(ast, i))
- return 0;
- return 1;
-}
-
-static int cbrscan_ast2150(struct ast_device *ast, int busw)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
- if (cbrtest_ast2150(ast))
- break;
- }
- if (loop == CBR_PASSNUM_AST2150)
- return 0;
- }
- return 1;
-}
-
-
-static void cbrdlli_ast2150(struct ast_device *ast, int busw)
-{
- u32 dll_min[4], dll_max[4], dlli, data, passcnt;
-
-cbr_start:
- dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
- dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
- passcnt = 0;
-
- for (dlli = 0; dlli < 100; dlli++) {
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
- data = cbrscan_ast2150(ast, busw);
- if (data != 0) {
- if (data & 0x1) {
- if (dll_min[0] > dlli)
- dll_min[0] = dlli;
- if (dll_max[0] < dlli)
- dll_max[0] = dlli;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD_AST2150)
- goto cbr_start;
- }
- if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
- goto cbr_start;
-
- dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
-}
-
-
-
-static void ast_init_dram_reg(struct ast_device *ast)
-{
- u8 j;
- u32 data, temp, i;
- const struct ast_dramstruct *dram_reg_info;
-
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
-
- if ((j & 0x80) == 0) { /* VGA only */
- if (IS_AST_GEN1(ast)) {
- dram_reg_info = ast2000_dram_table_data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x10100, 0xa8);
-
- do {
- ;
- } while (ast_read32(ast, 0x10100) != 0xa8);
- } else { /* GEN2/GEN3 */
- if (ast->chip == AST2100 || ast->chip == AST2200)
- dram_reg_info = ast2100_dram_table_data;
- else
- dram_reg_info = ast1100_dram_table_data;
-
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688A8A8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x01);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x01);
- }
-
- while (dram_reg_info->index != 0xffff) {
- if (dram_reg_info->index == 0xff00) {/* delay fn */
- for (i = 0; i < 15; i++)
- udelay(dram_reg_info->data);
- } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
- data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
- data = 0x00000c8d;
-
- temp = ast_read32(ast, 0x12070);
- temp &= 0xc;
- temp <<= 2;
- ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
- } else
- ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
- dram_reg_info++;
- }
-
- /* AST 2100/2150 DRAM calibration */
- data = ast_read32(ast, 0x10120);
- if (data == 0x5061) { /* 266Mhz */
- data = ast_read32(ast, 0x10004);
- if (data & 0x40)
- cbrdlli_ast2150(ast, 16); /* 16 bits */
- else
- cbrdlli_ast2150(ast, 32); /* 32 bits */
- }
-
- switch (AST_GEN(ast)) {
- case 1:
- temp = ast_read32(ast, 0x10140);
- ast_write32(ast, 0x10140, temp | 0x40);
- break;
- case 2:
- case 3:
- temp = ast_read32(ast, 0x1200c);
- ast_write32(ast, 0x1200c, temp & 0xfffffffd);
- temp = ast_read32(ast, 0x12040);
- ast_write32(ast, 0x12040, temp | 0x40);
- break;
- default:
- break;
- }
- }
-
- /* wait ready */
- do {
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((j & 0x40) == 0);
-}
-
int ast_post_gpu(struct ast_device *ast)
{
int ret;
- ast_set_def_ext_reg(ast);
-
if (AST_GEN(ast) >= 7) {
- if (ast->tx_chip == AST_TX_ASTDP) {
- ret = ast_dp_launch(ast);
- if (ret)
- return ret;
- }
+ ret = ast_2600_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 6) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2500(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2500_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 4) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2300(ast);
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2300_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 2) {
+ ret = ast_2100_post(ast);
+ if (ret)
+ return ret;
} else {
- if (ast->config_mode == ast_use_p2a) {
- ast_init_dram_reg(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2000_post(ast);
+ if (ret)
+ return ret;
}
return 0;
}
-/* AST 2300 DRAM settings */
-#define AST_DDR3 0
-#define AST_DDR2 1
-
-struct ast2300_dram_param {
- u32 dram_type;
- u32 dram_chipid;
- u32 dram_freq;
- u32 vram_size;
- u32 odt;
- u32 wodt;
- u32 rodt;
- u32 dram_config;
- u32 reg_PERIOD;
- u32 reg_MADJ;
- u32 reg_SADJ;
- u32 reg_MRS;
- u32 reg_EMRS;
- u32 reg_AC1;
- u32 reg_AC2;
- u32 reg_DQSIC;
- u32 reg_DRV;
- u32 reg_IOZ;
- u32 reg_DQIDLY;
- u32 reg_FREQ;
- u32 madj_max;
- u32 dll2_finetune_step;
-};
-
-/*
- * DQSI DLL CBR Setting
- */
-#define CBR_SIZE0 ((1 << 10) - 1)
-#define CBR_SIZE1 ((4 << 10) - 1)
-#define CBR_SIZE2 ((64 << 10) - 1)
-#define CBR_PASSNUM 5
-#define CBR_PASSNUM2 5
-#define CBR_THRESHOLD 10
-#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
-#define CBR_PATNUM 8
-static const u32 pattern[8] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0x88778877,
- 0x92CC4D6E,
- 0x543D3CDE,
- 0xF1E843C7,
- 0x7C61D253
-};
-
-static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -458,1657 +123,7 @@ static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
- if (++timeout > TIMEOUT) {
- ast_moutdwm(ast, 0x1e6e0070, 0x0);
- return 0xffffffff;
- }
- } while (!data);
- data = ast_mindwm(ast, 0x1e6e0078);
- data = (data | (data >> 16)) & 0xffff;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-
-static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
+bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-
-static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x41);
-}
-
-static bool mmc_test_single(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0xc5);
-}
-
-static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x05);
-}
-
-static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0x85);
-}
-
-static int cbr_test(struct ast_device *ast)
-{
- u32 data;
- int i;
- data = mmc_test_single2(ast, 0);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- for (i = 0; i < 8; i++) {
- data = mmc_test_burst2(ast, i);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- }
- if (!data)
- return 3;
- else if (data & 0xff)
- return 2;
- return 1;
-}
-
-static int cbr_scan(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 3;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static u32 cbr_test2(struct ast_device *ast)
-{
- u32 data;
-
- data = mmc_test_burst2(ast, 0);
- if (data == 0xffff)
- return 0;
- data |= mmc_test_single2(ast, 0);
- if (data == 0xffff)
- return 0;
-
- return ~data & 0xffff;
-}
-
-static u32 cbr_scan2(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 0xffff;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test2(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static bool cbr_test3(struct ast_device *ast)
-{
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single(ast, 0))
- return false;
- return true;
-}
-
-static bool cbr_scan3(struct ast_device *ast)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < 2; loop++) {
- if (cbr_test3(ast))
- break;
- }
- if (loop == 2)
- return false;
- }
- return true;
-}
-
-static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
- bool status = false;
-FINETUNE_START:
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD2) {
- break;
- }
- }
- gold_sadj[0] = 0x0;
- passcnt = 0;
- for (cnt = 0; cnt < 16; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- gold_sadj[0] += dllmin[cnt];
- passcnt++;
- }
- }
- if (retry++ > 10)
- goto FINETUNE_DONE;
- if (passcnt != 16) {
- goto FINETUNE_START;
- }
- status = true;
-FINETUNE_DONE:
- gold_sadj[0] = gold_sadj[0] >> 4;
- gold_sadj[1] = gold_sadj[0];
-
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[0] >= dlli) {
- dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- }
- } else {
- dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0080, data);
-
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[1] >= dlli) {
- dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- } else {
- dlli = (dlli - 1) & 0x7;
- }
- } else {
- dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
- dlli += 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0084, data);
- return status;
-} /* finetuneDQI_L */
-
-static void finetuneDQSI(struct ast_device *ast)
-{
- u32 dlli, dqsip, dqidly;
- u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
- u32 g_dqidly, g_dqsip, g_margin, g_side;
- u16 pass[32][2][2];
- char tag[2][76];
-
- /* Disable DQI CBR */
- reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
- reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
- reg_mcr18 &= 0x0000ffff;
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
- for (dlli = 0; dlli < 76; dlli++) {
- tag[0][dlli] = 0x0;
- tag[1][dlli] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- pass[dqidly][0][0] = 0xff;
- pass[dqidly][0][1] = 0x0;
- pass[dqidly][1][0] = 0xff;
- pass[dqidly][1][1] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- passcnt[0] = passcnt[1] = 0;
- for (dqsip = 0; dqsip < 2; dqsip++) {
- ast_moutdwm(ast, 0x1E6E000C, 0);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
- ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0070, 0);
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
- if (cbr_scan3(ast)) {
- if (dlli == 0)
- break;
- passcnt[dqsip]++;
- tag[dqsip][dlli] = 'P';
- if (dlli < pass[dqidly][dqsip][0])
- pass[dqidly][dqsip][0] = (u16) dlli;
- if (dlli > pass[dqidly][dqsip][1])
- pass[dqidly][dqsip][1] = (u16) dlli;
- } else if (passcnt[dqsip] >= 5)
- break;
- else {
- pass[dqidly][dqsip][0] = 0xff;
- pass[dqidly][dqsip][1] = 0x0;
- }
- }
- }
- if (passcnt[0] == 0 && passcnt[1] == 0)
- dqidly++;
- }
- /* Search margin */
- g_dqidly = g_dqsip = g_margin = g_side = 0;
-
- for (dqidly = 0; dqidly < 32; dqidly++) {
- for (dqsip = 0; dqsip < 2; dqsip++) {
- if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
- continue;
- diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
- if ((diff+2) < g_margin)
- continue;
- passcnt[0] = passcnt[1] = 0;
- for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
- for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
- if (passcnt[0] > passcnt[1])
- passcnt[0] = passcnt[1];
- passcnt[1] = 0;
- if (passcnt[0] > g_side)
- passcnt[1] = passcnt[0] - g_side;
- if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- } else if (passcnt[1] > 1 && g_side < 8) {
- if (diff > g_margin)
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- }
- }
- }
- reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
-}
-static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
- bool status = false;
-
- finetuneDQSI(ast);
- if (finetuneDQI_L(ast, param) == false)
- return status;
-
-CBR_START2:
- dllmin[0] = dllmin[1] = 0xff;
- dllmax[0] = dllmax[1] = 0x0;
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
- data = cbr_scan(ast);
- if (data != 0) {
- if (data & 0x1) {
- if (dllmin[0] > dlli) {
- dllmin[0] = dlli;
- }
- if (dllmax[0] < dlli) {
- dllmax[0] = dlli;
- }
- }
- if (data & 0x2) {
- if (dllmin[1] > dlli) {
- dllmin[1] = dlli;
- }
- if (dllmax[1] < dlli) {
- dllmax[1] = dlli;
- }
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD) {
- break;
- }
- }
- if (retry++ > 10)
- goto CBR_DONE2;
- if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- status = true;
-CBR_DONE2:
- dlli = (dllmin[1] + dllmax[1]) >> 1;
- dlli <<= 8;
- dlli += (dllmin[0] + dllmax[0]) >> 1;
- ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
- return status;
-} /* CBRDLL2 */
-
-static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = 0x00020000 + (trap << 16);
- trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
- trap_MRS = 0x00000010 + (trap << 4);
- trap_MRS |= ((trap & 0x2) << 18);
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 0;
- param->reg_AC1 = 0x22202725;
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x04001400 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA00761C | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA007636 | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xCD44961A;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00081830;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 4;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0270);
- param->wodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xDE44A61D;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x070000BB;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 4;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0290);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xEF44B61E;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000088;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302A37;
- param->reg_AC2 = 0xEF56B61E;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 136;
- param->dll2_finetune_step = 3;
- break;
- case 600:
- ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xDF56B61F;
- param->reg_DQSIC = 0x0000014D;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000058C0;
- param->madj_max = 132;
- param->dll2_finetune_step = 3;
- break;
- case 624:
- ast_moutdwm(ast, 0x1E6E2020, 0x0160);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xEF56B621;
- param->reg_DQSIC = 0x0000015A;
- param->reg_MRS = 0x02101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000059C0;
- param->madj_max = 128;
- param->dll2_finetune_step = 3;
- break;
- } /* switch freq */
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x130;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x131;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x132;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x133;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case AST_VIDMEM_SIZE_8M:
- param->dram_config |= 0x00;
- break;
- case AST_VIDMEM_SIZE_16M:
- param->dram_config |= 0x04;
- break;
- case AST_VIDMEM_SIZE_32M:
- param->dram_config |= 0x08;
- break;
- case AST_VIDMEM_SIZE_64M:
- param->dram_config |= 0x0c;
- break;
- }
-
-}
-
-static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr3_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
- ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- data = 0;
- if (param->wodt) {
- data = 0x300;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr3_init_start;
-
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-
-}
-
-static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = (trap << 20) | (trap << 16);
- trap_AC2 += 0x00110000;
- trap_MRS = 0x00000040 | (trap << 4);
-
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 264:
- ast_moutdwm(ast, 0x1E6E2020, 0x0130);
- param->wodt = 0;
- param->reg_AC1 = 0x11101513;
- param->reg_AC2 = 0x78117011;
- param->reg_DQSIC = 0x00000092;
- param->reg_MRS = 0x00000842;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x000000F0;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x0000005A;
- param->reg_FREQ = 0x00004AC0;
- param->madj_max = 138;
- param->dll2_finetune_step = 3;
- break;
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 1;
- param->reg_AC1 = 0x22202613;
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x00000A02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xAA009012 | trap_AC2;
- break;
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA009023 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA00903B | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xCD44B01E;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 3;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0261);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xDE44C022;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 3;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0120);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xEF44D024;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F9;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A7;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 552:
- ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E025;
- param->reg_DQSIC = 0x00000132;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000AD;
- param->reg_FREQ = 0x000056C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E027;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000B3;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- }
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x100;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x121;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x122;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x123;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case AST_VIDMEM_SIZE_8M:
- param->dram_config |= 0x00;
- break;
- case AST_VIDMEM_SIZE_16M:
- param->dram_config |= 0x04;
- break;
- case AST_VIDMEM_SIZE_32M:
- param->dram_config |= 0x08;
- break;
- case AST_VIDMEM_SIZE_64M:
- param->dram_config |= 0x0c;
- break;
- }
-}
-
-static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr2_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
- ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
- ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
-
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
- data = 0;
- if (param->wodt) {
- data = 0x500;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr2_init_start;
-
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-}
-
-static void ast_post_chip_2300(struct ast_device *ast)
-{
- struct ast2300_dram_param param;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & 0x80) == 0) {/* vga only */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688a8a8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x1);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x1);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- param.dram_freq = 396;
- param.dram_type = AST_DDR3;
- temp = ast_mindwm(ast, 0x1e6e2070);
- if (temp & 0x01000000)
- param.dram_type = AST_DDR2;
- switch (temp & 0x18000000) {
- case 0:
- param.dram_chipid = AST_DRAM_512Mx16;
- break;
- default:
- case 0x08000000:
- param.dram_chipid = AST_DRAM_1Gx16;
- break;
- case 0x10000000:
- param.dram_chipid = AST_DRAM_2Gx16;
- break;
- case 0x18000000:
- param.dram_chipid = AST_DRAM_4Gx16;
- break;
- }
- switch (temp & 0x0c) {
- default:
- case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
- break;
-
- case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
- break;
-
- case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
- break;
-
- case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
- break;
- }
-
- if (param.dram_type == AST_DDR3) {
- get_ddr3_info(ast, &param);
- ddr3_init(ast, &param);
- } else {
- get_ddr2_info(ast, &param);
- ddr2_init(ast, &param);
- }
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
-
-static bool cbr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static bool ddr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_burst(ast, 1))
- return false;
- if (!mmc_test_burst(ast, 2))
- return false;
- if (!mmc_test_burst(ast, 3))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static void ddr_init_common_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
- ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
- ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
- ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
- ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
- ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
-}
-
-static void ddr_phy_init_2500(struct ast_device *ast)
-{
- u32 data, pass, timecnt;
-
- pass = 0;
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- while (!pass) {
- for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
- data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
- if (!data)
- break;
- }
- if (timecnt != TIMEOUT) {
- data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
- if (!data)
- pass = 1;
- }
- if (!pass) {
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- udelay(10); /* delay 10 us */
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- }
- }
-
- ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
-}
-
-/*
- * Check DRAM Size
- * 1Gb : 0x80000000 ~ 0x87FFFFFF
- * 2Gb : 0x80000000 ~ 0x8FFFFFFF
- * 4Gb : 0x80000000 ~ 0x9FFFFFFF
- * 8Gb : 0x80000000 ~ 0xBFFFFFFF
- */
-static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
-{
- u32 reg_04, reg_14;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
- reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
-
- ast_moutdwm(ast, 0xA0100000, 0x41424344);
- ast_moutdwm(ast, 0x90100000, 0x35363738);
- ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
- ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
-
- /* Check 8Gbit */
- if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
- reg_04 |= 0x03;
- reg_14 |= (tRFC >> 24) & 0xFF;
- /* Check 4Gbit */
- } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
- reg_04 |= 0x02;
- reg_14 |= (tRFC >> 16) & 0xFF;
- /* Check 2Gbit */
- } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
- reg_04 |= 0x01;
- reg_14 |= (tRFC >> 8) & 0xFF;
- } else {
- reg_14 |= tRFC & 0xFF;
- }
- ast_moutdwm(ast, 0x1E6E0004, reg_04);
- ast_moutdwm(ast, 0x1E6E0014, reg_14);
-}
-
-static void enable_cache_2500(struct ast_device *ast)
-{
- u32 reg_04, data;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004);
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
-
- do
- data = ast_mindwm(ast, 0x1E6E0004);
- while (!(data & 0x80000));
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
-}
-
-static void set_mpll_2500(struct ast_device *ast)
-{
- u32 addr, data, param;
-
- /* Reset MMC */
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
- ast_moutdwm(ast, addr, 0x0);
- addr += 4;
- }
- ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
- if (data) {
- /* CLKIN = 25MHz */
- param = 0x930023E0;
- ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
- } else {
- /* CLKIN = 24MHz */
- param = 0x93002400;
- }
- ast_moutdwm(ast, 0x1E6E2020, param);
- udelay(100);
-}
-
-static void reset_mmc_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E78505C, 0x00000004);
- ast_moutdwm(ast, 0x1E785044, 0x00000001);
- ast_moutdwm(ast, 0x1E785048, 0x00004755);
- ast_moutdwm(ast, 0x1E78504C, 0x00000013);
- mdelay(100);
- ast_moutdwm(ast, 0x1E785054, 0x00000077);
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
-}
-
-static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
- u32 data, data2, pass, retrycnt;
- u32 ddr_vref, phy_vref;
- u32 min_ddr_vref = 0, min_phy_vref = 0;
- u32 max_ddr_vref = 0, max_phy_vref = 0;
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
- ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
-
- /* Train PHY Vref first */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- max_phy_vref = 0x0;
- pass = 0;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
- for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- data = ast_mindwm(ast, 0x1E6E03D0);
- data2 = data >> 8;
- data = data & 0xff;
- if (data > data2)
- data = data2;
- if (max_phy_vref < data) {
- max_phy_vref = data;
- min_phy_vref = phy_vref;
- }
- } else if (pass > 0)
- break;
- }
- }
- ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
-
- /* Train DDR Vref next */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- min_ddr_vref = 0xFF;
- max_ddr_vref = 0x0;
- pass = 0;
- for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- if (min_ddr_vref > ddr_vref)
- min_ddr_vref = ddr_vref;
- if (max_ddr_vref < ddr_vref)
- max_ddr_vref = ddr_vref;
- } else if (pass != 0)
- break;
- }
- }
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static bool ast_dram_init_2500(struct ast_device *ast)
-{
- u32 data;
- u32 max_tries = 5;
-
- do {
- if (max_tries-- == 0)
- return false;
- set_mpll_2500(ast);
- reset_mmc_2500(ast);
- ddr_init_common_2500(ast);
-
- data = ast_mindwm(ast, 0x1E6E2070);
- if (data & 0x01000000)
- ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
- else
- ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
- } while (!ddr_test_2500(ast));
-
- ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
-
- /* Patch code */
- data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
- ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
-
- return true;
-}
-
-void ast_patch_ahb_2500(void __iomem *regs)
-{
- u32 data;
-
- /* Clear bus lock condition */
- __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
- __ast_moutdwm(regs, 0x1e600084, 0x00010000);
- __ast_moutdwm(regs, 0x1e600088, 0x00000000);
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
-
- data = __ast_mindwm(regs, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
- /*
- * If "Fast restet" is enabled for ARM-ICE debugger,
- * then WDT needs to enable, that
- * WDT04 is WDT#1 Reload reg.
- * WDT08 is WDT#1 counter restart reg to avoid system deadlock
- * WDT0C is WDT#1 control reg
- * [6:5]:= 01:Full chip
- * [4]:= 1:1MHz clock source
- * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
- * [0]:= 1:WDT enable
- */
- __ast_moutdwm(regs, 0x1E785004, 0x00000010);
- __ast_moutdwm(regs, 0x1E785008, 0x00004755);
- __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
- udelay(1000);
- }
-
- do {
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
- data = __ast_mindwm(regs, 0x1e6e2000);
- } while (data != 1);
-
- __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
-}
-
-void ast_post_chip_2500(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
- /* Clear bus lock condition */
- ast_patch_ahb_2500(ast->regs);
-
- /* Disable watchdog */
- ast_moutdwm(ast, 0x1E78502C, 0x00000000);
- ast_moutdwm(ast, 0x1E78504C, 0x00000000);
-
- /*
- * Reset USB port to patch USB unknown device issue
- * SCU90 is Multi-function Pin Control #5
- * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
- * port).
- * SCU94 is Multi-function Pin Control #6
- * [14:13]:= 1x:USB2.0 Host2 controller
- * SCU70 is Hardware Strap reg
- * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
- * [18]: 0(24)/1(48) MHz)
- * SCU7C is Write clear reg to SCU70
- * [23]:= write 1 and then SCU70[23] will be clear as 0b.
- */
- ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
- ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
- if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
- ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
- mdelay(100);
- ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
- }
- /* Modify eSPI reset pin */
- temp = ast_mindwm(ast, 0x1E6E2070);
- if (temp & 0x02000000)
- ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- if (!ast_dram_init_2500(ast))
- drm_err(dev, "DRAM init failed !\n");
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
diff --git a/drivers/gpu/drm/ast/ast_post.h b/drivers/gpu/drm/ast/ast_post.h
new file mode 100644
index 000000000000..aa5d247bebe8
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef AST_POST_H
+#define AST_POST_H
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct ast_device;
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+/* hardware fields */
+#define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004
+
+/* control commands */
+#define __AST_DRAMSTRUCT_UDELAY 0xff00
+#define __AST_DRAMSTRUCT_INVALID 0xffff
+
+#define __AST_DRAMSTRUCT_INDEX(_name) \
+ (__AST_DRAMSTRUCT_ ## _name)
+
+#define AST_DRAMSTRUCT_INIT(_name, _value) \
+ { __AST_DRAMSTRUCT_INDEX(_name), (_value) }
+
+#define AST_DRAMSTRUCT_UDELAY(_usecs) \
+ AST_DRAMSTRUCT_INIT(UDELAY, _usecs)
+#define AST_DRAMSTRUCT_INVALID \
+ AST_DRAMSTRUCT_INIT(INVALID, U32_MAX)
+
+#define AST_DRAMSTRUCT_IS(_entry, _name) \
+ ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name))
+
+u32 __ast_mindwm(void __iomem *regs, u32 r);
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v);
+
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl);
+bool mmc_test_burst(struct ast_device *ast, u32 datagen);
+
+/* ast_2000.c */
+void ast_2000_set_def_ext_reg(struct ast_device *ast);
+
+/* ast_2300.c */
+void ast_2300_set_def_ext_reg(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index bb2cc1d8b84e..e15adaf3a80e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -30,9 +30,11 @@
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 09a1be234f71..b9e0ca85226a 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
tristate
depends on DRM_BRIDGE && OF
select AUXILIARY_BUS
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Simple transparent bridge that is used by several non-DRM drivers to
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index f46a5e26b5dd..59a5256ce8a6 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -5,6 +5,9 @@ config DRM_I2C_ADV7511
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
@@ -19,7 +22,7 @@ config DRM_I2C_ADV7511_AUDIO
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
- select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
default y
help
When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ec0b7f3d889c..85ebead9809c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -313,16 +313,11 @@ enum adv7511_csc_scaling {
* @csc_enable: Whether to enable color space conversion
* @csc_scaling_factor: Color space conversion scaling factor
* @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
*/
struct adv7511_video_config {
bool csc_enable;
enum adv7511_csc_scaling csc_scaling_factor;
const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
};
enum adv7511_type {
@@ -337,6 +332,7 @@ struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
+ const char *name;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
@@ -371,7 +367,7 @@ struct adv7511 {
struct work_struct hpd_work;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *cec_connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -389,9 +385,7 @@ struct adv7511 {
bool use_timing_gen;
const struct adv7511_chip_info *info;
- struct platform_device *audio_pdev;
- struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
@@ -399,20 +393,29 @@ struct adv7511 {
u32 cec_clk_freq;
};
+static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable);
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr);
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
-static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
-{
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
- return 0;
-}
+#define adv7511_cec_init NULL
+#define adv7511_cec_enable NULL
+#define adv7511_cec_log_addr NULL
+#define adv7511_cec_transmit NULL
#endif
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
@@ -421,16 +424,18 @@ int adv7533_attach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_audio_exit(struct adv7511 *adv7511);
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
-static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- return 0;
-}
-static inline void adv7511_audio_exit(struct adv7511 *adv7511)
-{
-}
+#define adv7511_hdmi_audio_startup NULL
+#define adv7511_hdmi_audio_shutdown NULL
+#define adv7511_hdmi_audio_prepare NULL
#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 1ff8c815ec79..766b1c96bc88 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -55,11 +55,12 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-static int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int audio_source, i2s_format = 0;
unsigned int invert_clock;
unsigned int rate;
@@ -167,9 +168,10 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
return 0;
}
-static int audio_startup(struct device *dev, void *data)
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
@@ -204,69 +206,12 @@ static int audio_startup(struct device *dev, void *data)
return 0;
}
-static void audio_shutdown(struct device *dev, void *data)
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
}
-
-static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint,
- void *data)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops adv7511_codec_ops = {
- .hw_params = adv7511_hdmi_hw_params,
- .audio_shutdown = audio_shutdown,
- .audio_startup = audio_startup,
- .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
-};
-
-static const struct hdmi_codec_pdata codec_data = {
- .ops = &adv7511_codec_ops,
- .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
- .max_i2s_channels = 2,
- .i2s = 1,
- .no_i2s_capture = 1,
- .spdif = 1,
- .no_spdif_capture = 1,
-};
-
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- adv7511->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
-}
-
-void adv7511_audio_exit(struct adv7511 *adv7511)
-{
- if (adv7511->audio_pdev) {
- platform_device_unregister(adv7511->audio_pdev);
- adv7511->audio_pdev = NULL;
- }
-}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 2e9c88a2b5ed..8ecbc25dc647 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -12,6 +12,8 @@
#include <media/cec.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
@@ -44,8 +46,8 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
- cec_transmit_attempt_done(adv7511->cec_adap,
- CEC_TX_STATUS_ARB_LOST);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
@@ -72,12 +74,14 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
- cec_transmit_done(adv7511->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, err_cnt);
+ drm_connector_hdmi_cec_transmit_done(adv7511->cec_connector, status,
+ 0, nack_cnt, low_drive_cnt,
+ err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
- cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_OK);
return;
}
}
@@ -116,7 +120,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0);
- cec_received_msg(adv7511->cec_adap, &msg);
+ drm_connector_hdmi_cec_received_msg(adv7511->cec_connector, &msg);
}
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
@@ -179,9 +183,9 @@ int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
return IRQ_HANDLED;
}
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
@@ -225,9 +229,9 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
return 0;
}
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -293,10 +297,10 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
return 0;
}
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@@ -328,12 +332,6 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
@@ -348,20 +346,18 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+ struct device *dev = &adv7511->i2c_main->dev;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
goto err_cec_parse_dt;
- adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap)) {
- ret = PTR_ERR(adv7511->cec_adap);
- goto err_cec_alloc;
- }
+ adv7511->cec_connector = connector;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
@@ -378,17 +374,8 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
- ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret)
- goto err_cec_register;
return 0;
-err_cec_register:
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
-err_cec_alloc:
- dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
- ret);
err_cec_parse_dt:
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 050dae338ffe..00d6417c177b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -12,14 +12,17 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <media/cec.h>
+#include <sound/pcm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "adv7511.h"
@@ -203,62 +206,37 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb, bool hdmi_mode)
+ bool rgb)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
- uint8_t infoframe[17];
-
- config.hdmi_mode = hdmi_mode;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
if (rgb) {
config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ output_format_422 = false;
+ output_format_ycbcr = false;
} else {
config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
DRM_COLOR_FORMAT_YCBCR422) &&
- config.hdmi_mode) {
+ connector->display_info.is_hdmi) {
config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
output_format_422 = true;
output_format_ycbcr = true;
- break;
- default:
+ } else {
+ config.csc_enable = true;
output_format_422 = false;
output_format_ycbcr = false;
- break;
}
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
}
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ if (connector->display_info.is_hdmi)
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+ else
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
adv7511_set_colormap(adv7511, config.csc_enable,
config.csc_coefficents,
@@ -269,15 +247,6 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
}
static void adv7511_set_link_config(struct adv7511 *adv7511,
@@ -446,22 +415,16 @@ static void adv7511_hpd_work(struct work_struct *work)
* restore its state.
*/
if (status == connector_status_connected &&
- adv7511->connector.status == connector_status_disconnected &&
+ adv7511->status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
- if (adv7511->connector.status != status) {
- adv7511->connector.status = status;
+ if (adv7511->status != status) {
+ adv7511->status = status;
- if (adv7511->connector.dev) {
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv7511->cec_adap);
- drm_kms_helper_hotplug_event(adv7511->connector.dev);
- } else {
- drm_bridge_hpd_notify(&adv7511->bridge, status);
- }
+ drm_bridge_hpd_notify(&adv7511->bridge, status);
}
}
@@ -636,45 +599,11 @@ static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- if (drm_edid) {
- /*
- * FIXME: The CEC physical address should be set using
- * cec_s_phys_addr(adap,
- * connector->display_info.source_physical_address, false) from
- * a path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
-
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
- } else {
- cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
- }
-
return drm_edid;
}
-static int adv7511_get_modes(struct adv7511 *adv7511,
- struct drm_connector *connector)
-{
- const struct drm_edid *drm_edid;
- unsigned int count;
-
- drm_edid = adv7511_edid_read(adv7511, connector);
-
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
-
- drm_edid_free(drm_edid);
-
- return count;
-}
-
static enum drm_connector_status
-adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511)
{
enum drm_connector_status status;
unsigned int val;
@@ -699,8 +628,6 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- if (connector)
- adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -719,17 +646,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
return status;
}
-static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- const struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static void adv7511_mode_set(struct adv7511 *adv7511,
- const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
@@ -800,11 +717,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (drm_mode_vrefresh(mode) <= 24)
+ if (drm_mode_vrefresh(adj_mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (drm_mode_vrefresh(mode) <= 25)
+ else if (drm_mode_vrefresh(adj_mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (drm_mode_vrefresh(mode) <= 30)
+ else if (drm_mode_vrefresh(adj_mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
@@ -821,82 +738,30 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ /* Update horizontal/vertical porch params */
+ if (adv7511->info->has_dsi && adv7511->use_timing_gen)
+ adv7533_dsi_config_timing_gen(adv7511);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
*/
- adv7511->f_tmds = mode->clock;
-}
-
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
-{
- return container_of(connector, struct adv7511, connector);
-}
-
-static int adv7511_connector_get_modes(struct drm_connector *connector)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_get_modes(adv, connector);
-}
-
-static enum drm_mode_status
-adv7511_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_mode_valid(adv, mode);
+ adv7511->f_tmds = adj_mode->clock;
}
-static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
- .get_modes = adv7511_connector_get_modes,
- .mode_valid = adv7511_connector_mode_valid,
-};
-
-static enum drm_connector_status
-adv7511_connector_detect(struct drm_connector *connector, bool force)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_detect(adv, connector);
-}
-
-static const struct drm_connector_funcs adv7511_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = adv7511_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int adv7511_connector_init(struct adv7511 *adv)
{
struct drm_bridge *bridge = &adv->bridge;
- int ret;
-
- if (adv->i2c_main->irq)
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
- else
- adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ struct drm_connector *connector;
- ret = drm_connector_init(bridge->dev, &adv->connector,
- &adv7511_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
+ return PTR_ERR(connector);
}
- drm_connector_helper_add(&adv->connector,
- &adv7511_connector_helper_funcs);
- drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
return 0;
}
@@ -905,7 +770,7 @@ static int adv7511_connector_init(struct adv7511 *adv)
* DRM Bridge Operations
*/
-static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+static const struct adv7511 *bridge_to_adv7511_const(const struct drm_bridge *bridge)
{
return container_of(bridge, struct adv7511, bridge);
}
@@ -914,8 +779,29 @@ static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
adv7511_power_on(adv);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ adv7511_set_config_csc(adv, connector, adv->rgb);
+
+ adv7511_mode_set(adv, &crtc_state->adjusted_mode);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -926,13 +812,17 @@ static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
adv7511_power_off(adv);
}
-static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
+static enum drm_mode_status
+adv7511_bridge_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ const struct adv7511 *adv = bridge_to_adv7511_const(bridge);
- adv7511_mode_set(adv, mode, adj_mode);
+ if (tmds_rate > 1000ULL * adv->info->max_mode_clock_khz)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
@@ -941,20 +831,21 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- if (adv->info->has_dsi)
- return adv7533_mode_valid(adv, mode);
- else
- return adv7511_mode_valid(adv, mode);
+ if (!adv->info->has_dsi)
+ return MODE_OK;
+
+ return adv7533_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -973,11 +864,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
return ret;
}
-static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_detect(adv, NULL);
+ return adv7511_detect(adv);
}
static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
@@ -988,28 +880,71 @@ static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge
return adv7511_edid_read(adv, connector);
}
-static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
- enum drm_connector_status status)
+static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv->cec_adap);
+ return 0;
+}
+
+static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ adv7511_bridge_hdmi_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
- .hpd_notify = adv7511_bridge_hpd_notify,
.atomic_enable = adv7511_bridge_atomic_enable,
.atomic_disable = adv7511_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+
+ .hdmi_tmds_char_rate_valid = adv7511_bridge_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = adv7511_bridge_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = adv7511_bridge_hdmi_write_infoframe,
+
+ .hdmi_audio_startup = adv7511_hdmi_audio_startup,
+ .hdmi_audio_prepare = adv7511_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = adv7511_hdmi_audio_shutdown,
+
+ .hdmi_cec_init = adv7511_cec_init,
+ .hdmi_cec_enable = adv7511_cec_enable,
+ .hdmi_cec_log_addr = adv7511_cec_log_addr,
+ .hdmi_cec_transmit = adv7511_cec_transmit,
};
/* -----------------------------------------------------------------------------
@@ -1223,9 +1158,10 @@ static int adv7511_probe(struct i2c_client *i2c)
if (!dev->of_node)
return -EINVAL;
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
+ adv7511 = devm_drm_bridge_alloc(dev, struct adv7511, bridge,
+ &adv7511_bridge_funcs);
+ if (IS_ERR(adv7511))
+ return PTR_ERR(adv7511);
adv7511->i2c_main = i2c;
adv7511->powered = false;
@@ -1322,22 +1258,43 @@ static int adv7511_probe(struct i2c_client *i2c)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
- ret = adv7511_cec_init(dev, adv7511);
- if (ret)
- goto err_unregister_cec;
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
- adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI;
if (adv7511->i2c_main->irq)
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.vendor = "Analog";
+ adv7511->bridge.product = adv7511->info->name;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO;
+ adv7511->bridge.hdmi_audio_dev = dev;
+ adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2;
+ adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
+ adv7511->bridge.hdmi_audio_spdif_playback = 1;
+ adv7511->bridge.hdmi_audio_dai_port = 2;
+#endif
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ adv7511->bridge.hdmi_cec_dev = dev;
+ adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev);
+ adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+#endif
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
- adv7511_audio_init(dev, adv7511);
-
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1359,10 +1316,7 @@ static int adv7511_probe(struct i2c_client *i2c)
return 0;
err_unregister_audio:
- adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
-err_unregister_cec:
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
@@ -1387,9 +1341,6 @@ static void adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
- adv7511_audio_exit(adv7511);
-
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1399,6 +1350,8 @@ static void adv7511_remove(struct i2c_client *i2c)
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
+ .name = "ADV7511",
+ .max_mode_clock_khz = 165000,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
@@ -1406,6 +1359,7 @@ static const struct adv7511_chip_info adv7511_chip_info = {
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
+ .name = "ADV7533",
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
@@ -1416,6 +1370,7 @@ static const struct adv7511_chip_info adv7533_chip_info = {
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
+ .name = "ADV7535",
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 122ad91e8a32..188c1093a66e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -24,7 +24,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
@@ -67,9 +67,6 @@ void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
- if (adv->use_timing_gen)
- adv7511_dsi_config_timing_gen(adv);
-
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
@@ -106,10 +103,6 @@ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- /* Check max clock for either 7533 or 7535 */
- if (mode->clock > adv->info->max_mode_clock_khz)
- return MODE_CLOCK_HIGH;
-
/* Check max clock for each lane */
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f3fe47b12edc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
struct device *dev;
int i, err;
- anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
- if (!anx6345)
- return -ENOMEM;
+ anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
+ &anx6345_bridge_funcs);
+ if (IS_ERR(anx6345))
+ return PTR_ERR(anx6345);
mutex_init(&anx6345->lock);
@@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
/* Look for supported chip ID */
anx6345_poweron(anx6345);
if (anx6345_get_chip_id(anx6345)) {
- anx6345->bridge.funcs = &anx6345_bridge_funcs;
drm_bridge_add(&anx6345->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..ba0fc149a9e7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -1220,9 +1193,10 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
bool found = false;
int err;
- anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
- if (!anx78xx)
- return -ENOMEM;
+ anx78xx = devm_drm_bridge_alloc(&client->dev, struct anx78xx, bridge,
+ &anx78xx_bridge_funcs);
+ if (IS_ERR(anx78xx))
+ return PTR_ERR(anx78xx);
pdata = &anx78xx->pdata;
@@ -1333,8 +1307,6 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
goto err_poweroff;
}
- anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index b1e482994ffe..e8662168717d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -5,6 +5,8 @@
* Based on anx7808 driver obtained from chromeos with copyright:
* Copyright(c) 2013, Google Inc.
*/
+
+#include <linux/export.h>
#include <linux/regmap.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 071168aa0c3b..ed35e567d117 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -838,10 +839,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
int ret;
/* Keep the panel disabled while we configure video */
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
+ drm_panel_disable(dp->plat_data->panel);
ret = analogix_dp_train_link(dp);
if (ret) {
@@ -863,13 +861,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
/* Safe to enable the panel now */
- if (dp->plat_data->panel) {
- ret = drm_panel_enable(dp->plat_data->panel);
- if (ret) {
- DRM_ERROR("failed to enable the panel\n");
- return ret;
- }
- }
+ drm_panel_enable(dp->plat_data->panel);
/* Check whether panel supports fast training */
ret = analogix_dp_fast_link_train_detection(dp);
@@ -955,67 +947,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
-/*
- * This function is a bit of a catch-all for panel preparation, hopefully
- * simplifying the logic of functions that need to prepare/unprepare the panel
- * below.
- *
- * If @prepare is true, this function will prepare the panel. Conversely, if it
- * is false, the panel will be unprepared.
- *
- * If @is_modeset_prepare is true, the function will disregard the current state
- * of the panel and either prepare/unprepare the panel based on @prepare. Once
- * it finishes, it will update dp->panel_is_modeset to reflect the current state
- * of the panel.
- */
-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
- bool prepare, bool is_modeset_prepare)
-{
- int ret = 0;
-
- if (!dp->plat_data->panel)
- return 0;
-
- mutex_lock(&dp->panel_lock);
-
- /*
- * Exit early if this is a temporary prepare/unprepare and we're already
- * modeset (since we neither want to prepare twice or unprepare early).
- */
- if (dp->panel_is_modeset && !is_modeset_prepare)
- goto out;
-
- if (prepare)
- ret = drm_panel_prepare(dp->plat_data->panel);
- else
- ret = drm_panel_unprepare(dp->plat_data->panel);
-
- if (ret)
- goto out;
-
- if (is_modeset_prepare)
- dp->panel_is_modeset = prepare;
-
-out:
- mutex_unlock(&dp->panel_lock);
- return ret;
-}
-
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
const struct drm_edid *drm_edid;
- int ret, num_modes = 0;
+ int num_modes = 0;
if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return 0;
- }
-
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
@@ -1024,10 +964,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
-
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
}
if (dp->plat_data->get_modes)
@@ -1082,24 +1018,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
- int ret;
if (dp->plat_data->panel)
return connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return connector_status_disconnected;
- }
-
if (!analogix_dp_detect_hpd(dp))
status = connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
return status;
}
@@ -1113,10 +1038,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1200,10 +1125,9 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
@@ -1214,9 +1138,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = analogix_dp_prepare_panel(dp, true, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_prepare(dp->plat_data->panel);
}
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
@@ -1258,7 +1180,7 @@ out_dp_init:
static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
@@ -1295,18 +1217,12 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
- struct analogix_dp_device *dp = bridge->driver_private;
- int ret;
+ struct analogix_dp_device *dp = to_dp(bridge);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
+ drm_panel_disable(dp->plat_data->panel);
disable_irq(dp->irq);
@@ -1314,9 +1230,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
- ret = analogix_dp_prepare_panel(dp, false, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_unprepare(dp->plat_data->panel);
dp->fast_train_enable = false;
dp->psr_supported = false;
@@ -1326,7 +1240,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -1364,7 +1278,7 @@ out:
static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
@@ -1386,7 +1300,7 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
@@ -1471,25 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.attach = analogix_dp_bridge_attach,
};
-static int analogix_dp_create_bridge(struct drm_device *drm_dev,
- struct analogix_dp_device *dp)
-{
- struct drm_bridge *bridge;
-
- bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("failed to allocate for drm bridge\n");
- return -ENOMEM;
- }
-
- dp->bridge = bridge;
-
- bridge->driver_private = dp;
- bridge->funcs = &analogix_dp_bridge_funcs;
-
- return drm_bridge_attach(dp->encoder, bridge, NULL, 0);
-}
-
static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
{
struct device_node *dp_node = dp->dev->of_node;
@@ -1505,6 +1400,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
video_info->max_link_rate = 0x0A;
video_info->max_lane_count = 0x04;
break;
+ case RK3588_EDP:
+ video_info->max_link_rate = 0x14;
+ video_info->max_lane_count = 0x04;
+ break;
case EXYNOS_DP:
/*
* NOTE: those property parseing code is used for
@@ -1540,6 +1439,26 @@ out:
return ret;
}
+static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+ int val;
+ int ret;
+
+ if (dp->force_hpd)
+ return 0;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
+ wait_us / 100, wait_us);
+
+ pm_runtime_mark_last_busy(dp->dev);
+ pm_runtime_put_autosuspend(dp->dev);
+
+ return ret;
+}
+
struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
@@ -1553,16 +1472,14 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_PTR(-EINVAL);
}
- dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
+ dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
+ &analogix_dp_bridge_funcs);
if (!dp)
return ERR_PTR(-ENOMEM);
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
- mutex_init(&dp->panel_lock);
- dp->panel_is_modeset = false;
-
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1597,10 +1514,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
}
dp->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dp->reg_base)) {
- ret = PTR_ERR(dp->reg_base);
- goto err_disable_clk;
- }
+ if (IS_ERR(dp->reg_base))
+ return ERR_CAST(dp->reg_base);
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1612,8 +1527,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- ret = PTR_ERR(dp->hpd_gpiod);
- goto err_disable_clk;
+ return ERR_CAST(dp->hpd_gpiod);
}
if (dp->hpd_gpiod) {
@@ -1625,16 +1539,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
* that we can get the current state of the GPIO.
*/
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
} else {
dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
+ irq_flags = IRQF_NO_AUTOEN;
}
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- ret = -ENODEV;
- goto err_disable_clk;
+ return ERR_PTR(-ENODEV);
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1643,15 +1556,22 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_clk;
+ return ERR_PTR(ret);
}
- disable_irq(dp->irq);
- return dp;
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
+ dp->aux.dev = dp->dev;
+ drm_dp_aux_init(&dp->aux);
-err_disable_clk:
- clk_disable_unprepare(dp->clock);
- return ERR_PTR(ret);
+ pm_runtime_use_autosuspend(dp->dev);
+ pm_runtime_set_autosuspend_delay(dp->dev, 100);
+ ret = devm_pm_runtime_enable(dp->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dp;
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
@@ -1681,6 +1601,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
if (dp->plat_data->power_on)
dp->plat_data->power_on(dp->plat_data);
+ phy_set_mode(dp->phy, PHY_MODE_DP);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1696,28 +1617,15 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_use_autosuspend(dp->dev);
- pm_runtime_set_autosuspend_delay(dp->dev, 100);
- pm_runtime_enable(dp->dev);
- } else {
- ret = analogix_dp_resume(dp);
- if (ret)
- return ret;
- }
-
- dp->aux.name = "DP-AUX";
- dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = dp->dev;
dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret) {
DRM_ERROR("failed to register AUX (%d)\n", ret);
- goto err_disable_pm_runtime;
+ return ret;
}
- ret = analogix_dp_create_bridge(drm_dev, dp);
+ ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
goto err_unregister_aux;
@@ -1727,13 +1635,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_unregister_aux:
drm_dp_aux_unregister(&dp->aux);
-err_disable_pm_runtime:
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
return ret;
}
@@ -1741,22 +1642,12 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind);
void analogix_dp_unbind(struct analogix_dp_device *dp)
{
- analogix_dp_bridge_disable(dp->bridge);
+ analogix_dp_bridge_disable(&dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
+ drm_panel_unprepare(dp->plat_data->panel);
drm_dp_aux_unregister(&dp->aux);
-
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
@@ -1782,6 +1673,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return dp->plat_data;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
+
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
+{
+ return &dp->aux;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
+
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Analogix DP Core Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 774d11574b09..b86e93f30ed6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -11,6 +11,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_bridge.h>
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
@@ -154,7 +155,7 @@ struct analogix_dp_device {
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
@@ -169,9 +170,6 @@ struct analogix_dp_device {
bool fast_train_enable;
bool psr_supported;
- struct mutex panel_lock;
- bool panel_is_modeset;
-
struct analogix_dp_plat_data *plat_data;
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 3afc73c858c4..38fd8d5014d2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <drm/bridge/analogix_dp.h>
@@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
+ int ret;
reg = bwtype;
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.link_rate =
+ drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
+ phy_cfg.dp.set_rate = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
@@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
{
u32 reg;
+ int ret;
reg = count;
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.lanes = dp->link_train.lane_count;
+ phy_cfg.dp.set_lanes = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
@@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
+ int ret;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ for (lane = 0; lane < dp->link_train.lane_count; lane++) {
+ u8 training_lane = dp->link_train.training_lane[lane];
+ u8 vs, pe;
+
+ vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ phy_cfg.dp.voltage[lane] = vs;
+ phy_cfg.dp.pre[lane] = pe;
+ }
+
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0b97b66de577..c0ad8f59e483 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1257,10 +1257,10 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(11000, 12000);
/* Power on pin enable */
- gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
/* Power reset pin enable */
- gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 1);
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
@@ -1280,9 +1280,9 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
return;
}
- gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 0);
usleep_range(1000, 1100);
- gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
@@ -1814,9 +1814,6 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- if (ctx->pdata.panel_bridge)
- return connector_status_connected;
-
return ctx->hpd_status ? connector_status_connected :
connector_status_disconnected;
}
@@ -2141,6 +2138,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2157,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2450,7 +2448,7 @@ anx7625_audio_update_connector_status(struct anx7625_data *ctx,
enum drm_connector_status status);
static enum drm_connector_status
-anx7625_bridge_detect(struct drm_bridge *bridge)
+anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2474,6 +2472,22 @@ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge
return anx7625_edid_read(ctx);
}
+static void anx7625_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_get_sync(dev);
+}
+
+static void anx7625_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_put_sync(dev);
+}
+
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
.detach = anx7625_bridge_detach,
@@ -2487,6 +2501,8 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.edid_read = anx7625_bridge_edid_read,
+ .hpd_enable = anx7625_bridge_hpd_enable,
+ .hpd_disable = anx7625_bridge_hpd_disable,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
@@ -2568,12 +2584,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
-static void anx7625_runtime_disable(void *data)
-{
- pm_runtime_dont_use_autosuspend(data);
- pm_runtime_disable(data);
-}
-
static int anx7625_link_bridge(struct drm_dp_aux *aux)
{
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
@@ -2586,13 +2596,11 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
return ret;
}
- platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
- if (!platform->pdata.panel_bridge)
- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
- DRM_BRIDGE_OP_DETECT;
+ if (!platform->pdata.panel_bridge || !anx7625_of_panel_on_aux_bus(dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT;
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
@@ -2621,10 +2629,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return -ENODEV;
}
- platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
- if (!platform) {
+ platform = devm_drm_bridge_alloc(dev, struct anx7625_data, bridge, &anx7625_bridge_funcs);
+ if (IS_ERR(platform)) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
- return -ENOMEM;
+ return PTR_ERR(platform);
}
pdata = &platform->pdata;
@@ -2707,11 +2715,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
goto free_wq;
}
- pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
- ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
goto free_wq;
@@ -2771,7 +2778,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..b3e4cdff61d6 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -17,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@@ -64,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;
@@ -86,6 +89,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +99,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -108,9 +112,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev, struct drm_aux_bridge_data,
+ bridge, &drm_aux_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
@@ -118,7 +123,6 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
"failed to acquire drm_bridge\n");
- data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..2e9c702c7087 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -64,10 +65,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str
adev->id = ret;
adev->name = "dp_hpd_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_hpd_bridge_release;
adev->dev.platform_data = of_node_get(np);
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
of_node_put(adev->dev.platform_data);
@@ -156,6 +158,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
@@ -170,12 +173,13 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_hpd_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev,
+ struct drm_aux_hpd_bridge_data, bridge,
+ &drm_aux_hpd_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
- data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
data->bridge.of_node = dev_get_platdata(data->dev);
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..a57ca8c3bdae 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -425,6 +425,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
+ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
+ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
+ if (ret)
+ return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -655,12 +670,28 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
u32 val;
+ /*
+ * The cdns-dsi controller needs to be disabled after it's DPI source
+ * has stopped streaming. If this is not followed, there is a brief
+ * window before DPI source is disabled and after cdns-dsi controller
+ * has been disabled where the DPI stream is still on, but the cdns-dsi
+ * controller is not ready anymore to accept the incoming signals. This
+ * is one of the reasons why a shift in pixel colors is observed on
+ * displays that have cdns-dsi as one of the bridges.
+ *
+ * To mitigate this, disable this bridge from the bridge post_disable()
+ * hook, instead of the bridge _disable() hook. The bridge post_disable()
+ * hook gets called after the CRTC disable, where often many DPI sources
+ * disable their streams.
+ */
+
val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
DISP_EOT_GEN);
@@ -672,13 +703,10 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
if (dsi->platform_ops && dsi->platform_ops->disable)
dsi->platform_ops->disable(dsi);
- pm_runtime_put(dsi->base.dev);
-}
-
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
pm_runtime_put(dsi->base.dev);
}
@@ -752,31 +780,73 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
+ /*
+ * The cdns-dsi controller needs to be enabled before it's DPI source
+ * has begun streaming. If this is not followed, there is a brief window
+ * after DPI source enable and before cdns-dsi controller enable where
+ * the DPI stream is on, but the cdns-dsi controller is not ready to
+ * accept the incoming signals. This is one of the reasons why a shift
+ * in pixel colors is observed on displays that have cdns-dsi as one of
+ * the bridges.
+ *
+ * To mitigate this, enable this bridge from the bridge pre_enable()
+ * hook, instead of the bridge _enable() hook. The bridge pre_enable()
+ * hook gets called before the CRTC enable, where often many DPI sources
+ * enable their streams.
+ */
+
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
- cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
+
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
@@ -892,25 +962,107 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
- if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
- return;
+ *num_input_fmts = 0;
- cdns_dsi_init_link(dsi);
- cdns_dsi_hs_init(dsi);
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+
+ return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
}
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1072,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -939,26 +1089,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1102,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1117,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1282,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
@@ -1166,9 +1295,10 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
int ret, irq;
u32 val;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct cdns_dsi, input.bridge,
+ &cdns_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
@@ -1226,7 +1356,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
* CDNS_DPI_INPUT.
*/
input->id = CDNS_DPI_INPUT;
- input->bridge.funcs = &cdns_dsi_bridge_funcs;
input->bridge.of_node = pdev->dev.of_node;
/* Mask all interrupts before registering the IRQ handler. */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 81fad14c2cd5..a614d1384f71 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2212,7 +2143,8 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2305,7 +2237,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
@@ -2458,9 +2390,10 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
int ret;
int irq;
- mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
- if (!mhdp)
- return -ENOMEM;
+ mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
+ &cdns_mhdp_bridge_funcs);
+ if (IS_ERR(mhdp))
+ return PTR_ERR(mhdp);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -2550,7 +2483,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->display_fmt.bpc = 8;
mhdp->bridge.of_node = pdev->dev.of_node;
- mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 81f7c701961f..814713c5bea9 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -689,9 +691,10 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
struct chipone *icn;
int ret;
- icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
- if (!icn)
- return -ENOMEM;
+ icn = devm_drm_bridge_alloc(dev, struct chipone, bridge,
+ &chipone_bridge_funcs);
+ if (IS_ERR(icn))
+ return PTR_ERR(icn);
icn->dev = dev;
@@ -699,7 +702,6 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
if (ret)
return ret;
- icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..54d49d4882c8 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -215,7 +215,7 @@ static enum drm_connector_status ch7033_connector_detect(
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- return drm_bridge_detect(priv->next_bridge);
+ return drm_bridge_detect(priv->next_bridge, connector);
}
static const struct drm_connector_funcs ch7033_connector_funcs = {
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
@@ -535,9 +536,10 @@ static int ch7033_probe(struct i2c_client *client)
unsigned int val;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct ch7033_priv, bridge,
+ &ch7033_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -574,7 +576,6 @@ static int ch7033_probe(struct i2c_client *client)
}
INIT_LIST_HEAD(&priv->bridge.list);
- priv->bridge.funcs = &ch7033_bridge_funcs;
priv->bridge.of_node = dev->of_node;
drm_bridge_add(&priv->bridge);
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index c8abd9920fee..a35dae9b56e2 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -103,9 +103,10 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
u8 buffer[4];
int ret;
- anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
- if (!anx7688)
- return -ENOMEM;
+ anx7688 = devm_drm_bridge_alloc(dev, struct cros_ec_anx7688, bridge,
+ &cros_ec_anx7688_bridge_funcs);
+ if (IS_ERR(anx7688))
+ return PTR_ERR(anx7688);
anx7688->client = client;
i2c_set_clientdata(client, anx7688);
@@ -153,7 +154,6 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
fw_version);
- anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
drm_bridge_add(&anx7688->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..52b7b5889e6f 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,13 +34,13 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
-static enum drm_connector_status
-display_connector_detect(struct drm_bridge *bridge)
+static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge)
{
struct display_connector *conn = to_display_connector(bridge);
@@ -81,6 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
+static enum drm_connector_status
+display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return display_connector_detect(bridge);
+}
+
static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -171,7 +177,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
- .detect = display_connector_detect,
+ .detect = display_connector_bridge_detect,
.edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
@@ -209,9 +215,10 @@ static int display_connector_probe(struct platform_device *pdev)
const char *label = NULL;
int ret;
- conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
- if (!conn)
- return -ENOMEM;
+ conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
+ &display_connector_bridge_funcs);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
platform_set_drvdata(pdev, conn);
@@ -361,7 +368,6 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
- conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
if (conn->bridge.ddc)
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 26ae1ab5237f..5c3cf37200bc 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
@@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
@@ -297,16 +298,15 @@ static int fsl_ldb_probe(struct platform_device *pdev)
struct fsl_ldb *fsl_ldb;
int dual_link;
- fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL);
- if (!fsl_ldb)
- return -ENOMEM;
+ fsl_ldb = devm_drm_bridge_alloc(dev, struct fsl_ldb, bridge, &funcs);
+ if (IS_ERR(fsl_ldb))
+ return PTR_ERR(fsl_ldb);
fsl_ldb->devdata = of_device_get_match_data(dev);
if (!fsl_ldb->devdata)
return -EINVAL;
fsl_ldb->dev = &pdev->dev;
- fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
fsl_ldb->clk = devm_clk_get(dev, "ldb");
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..6149ba141a38 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
@@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
}
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs)
+void ldb_add_bridge_helper(struct ldb *ldb)
{
struct ldb_channel *ldb_ch;
int i;
@@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
continue;
ldb_ch->bridge.driver_private = ldb_ch;
- ldb_ch->bridge.funcs = bridge_funcs;
ldb_ch->bridge.of_node = ldb_ch->np;
drm_bridge_add(&ldb_ch->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..de187e326999 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
int ldb_find_next_bridge_helper(struct ldb *ldb);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs);
+void ldb_add_bridge_helper(struct ldb *ldb);
void ldb_remove_bridge_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..0e31d5000e7c 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -5,6 +5,8 @@
* bridge driver for legacy DT bindings, utilizing display-timings node
*/
+#include <linux/export.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
@@ -23,7 +25,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -58,9 +61,10 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
struct imx_legacy_bridge *imx_bridge;
int ret;
- imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
- if (!imx_bridge)
- return ERR_PTR(-ENOMEM);
+ imx_bridge = devm_drm_bridge_alloc(dev, struct imx_legacy_bridge,
+ base, &imx_legacy_bridge_funcs);
+ if (IS_ERR(imx_bridge))
+ return ERR_CAST(imx_bridge);
ret = of_get_drm_display_mode(np,
&imx_bridge->mode,
@@ -71,14 +75,13 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
- imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
imx_bridge->base.of_node = np;
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index a17433a7c755..3a6f8587a257 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
@@ -139,9 +140,10 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
struct device_node *remote;
struct imx8mp_hdmi_pvi *pvi;
- pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
- if (!pvi)
- return -ENOMEM;
+ pvi = devm_drm_bridge_alloc(&pdev->dev, struct imx8mp_hdmi_pvi,
+ bridge, &imx_hdmi_pvi_bridge_funcs);
+ if (IS_ERR(pvi))
+ return PTR_ERR(pvi);
platform_set_drvdata(pdev, pvi);
pvi->dev = &pdev->dev;
@@ -165,7 +167,6 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Register the bridge. */
- pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
pvi->bridge.of_node = pdev->dev.of_node;
pvi->bridge.timings = pvi->next_bridge->timings;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 524aac751359..47aa65938e6a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
struct imx8qm_ldb {
struct ldb base;
struct device *dev;
- struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
int active_chno;
@@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
@@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
@@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
}
if (is_split) {
- ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel0 PHY: %d\n",
ret);
- ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel1 PHY: %d\n",
@@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
ldb_bridge_disable_helper(bridge);
if (is_split) {
- ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel0 PHY: %d\n",
ret);
- ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel1 PHY: %d\n",
@@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
int i, ret;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (!ldb_ch->is_available)
@@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
if (!imx8qm_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
+ &imx8qm_ldb_bridge_funcs);
+ if (IS_ERR(imx8qm_ldb->channel[i]))
+ return PTR_ERR(imx8qm_ldb->channel[i]);
+ }
+
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
@@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
}
imx8qm_ldb->active_chno = 0;
- imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[0];
ldb_ch = &imx8qm_ldb_ch->base;
ldb_ch->link_type = pixel_order;
} else {
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qm_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 3cb484773ddf..5d272916e200 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
struct imx8qxp_ldb {
struct ldb base;
struct device *dev;
- struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
struct drm_bridge *companion;
@@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct device_node *ep, *remote;
struct device *dev = imx8qxp_ldb->dev;
@@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct ldb_channel *companion_ldb_ch;
struct device_node *companion;
@@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
if (!imx8qxp_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
+ &imx8qxp_ldb_bridge_funcs);
+ if (IS_ERR(imx8qxp_ldb->channel[i]))
+ return PTR_ERR(imx8qxp_ldb->channel[i]);
+ }
+
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
@@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
ldb_ch = &imx8qxp_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -660,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qxp_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1d9529dc7f2a..8517b1c953d4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -63,12 +63,11 @@ struct imx8qxp_pc_channel {
struct drm_bridge *next_bridge;
struct imx8qxp_pc *pc;
unsigned int stream_id;
- bool is_available;
};
struct imx8qxp_pc {
struct device *dev;
- struct imx8qxp_pc_channel ch[2];
+ struct imx8qxp_pc_channel *ch[2];
struct clk *clk_apb;
void __iomem *base;
};
@@ -108,6 +107,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +119,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -306,7 +306,14 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
goto free_child;
}
- ch = &pc->ch[i];
+ ch = devm_drm_bridge_alloc(dev, struct imx8qxp_pc_channel, bridge,
+ &imx8qxp_pc_bridge_funcs);
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ goto free_child;
+ }
+
+ pc->ch[i] = ch;
ch->pc = pc;
ch->stream_id = i;
@@ -332,9 +339,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
of_node_put(remote);
ch->bridge.driver_private = ch;
- ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
ch->bridge.of_node = child;
- ch->is_available = true;
drm_bridge_add(&ch->bridge);
}
@@ -344,8 +349,8 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0].next_bridge)
- drm_bridge_remove(&pc->ch[0].bridge);
+ if (i == 1 && pc->ch[0]->next_bridge)
+ drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
return ret;
@@ -358,13 +363,10 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
int i;
for (i = 0; i < 2; i++) {
- ch = &pc->ch[i];
-
- if (!ch->is_available)
- continue;
+ ch = pc->ch[i];
- drm_bridge_remove(&ch->bridge);
- ch->is_available = false;
+ if (ch)
+ drm_bridge_remove(&ch->bridge);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index cd6818db0fd3..e5943506981d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -326,9 +327,10 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return -ENOMEM;
+ pl = devm_drm_bridge_alloc(dev, struct imx8qxp_pixel_link, bridge,
+ &imx8qxp_pixel_link_bridge_funcs);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
ret = imx_scu_get_handle(&pl->ipc_handle);
if (ret) {
@@ -383,7 +385,6 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pl);
pl->bridge.driver_private = pl;
- pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
pl->bridge.of_node = np;
drm_bridge_add(&pl->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 49dd4f96d52c..111310acab2c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -391,9 +392,10 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
- if (!p2d)
- return -ENOMEM;
+ p2d = devm_drm_bridge_alloc(dev, struct imx8qxp_pxl2dpi, bridge,
+ &imx8qxp_pxl2dpi_bridge_funcs);
+ if (IS_ERR(p2d))
+ return PTR_ERR(p2d);
p2d->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(p2d->regmap)) {
@@ -440,7 +442,6 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
p2d->bridge.driver_private = p2d;
- p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
p2d->bridge.of_node = np;
drm_bridge_add(&p2d->bridge);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 21152a1c28f7..cf813672b4ff 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -687,12 +688,13 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
-static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -815,9 +817,10 @@ static int it6263_probe(struct i2c_client *client)
struct it6263 *it;
int ret;
- it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
- if (!it)
- return -ENOMEM;
+ it = devm_drm_bridge_alloc(dev, struct it6263, bridge,
+ &it6263_bridge_funcs);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
it->dev = dev;
it->hdmi_i2c = client;
@@ -865,7 +868,6 @@ static int it6263_probe(struct i2c_client *client)
i2c_set_clientdata(client, it);
- it->bridge.funcs = &it6263_bridge_funcs;
it->bridge.of_node = dev->of_node;
/* IT6263 chip doesn't support HPD interrupt. */
it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 8a607558ac89..89649c17ffad 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
@@ -3275,7 +3238,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-it6505_bridge_detect(struct drm_bridge *bridge)
+it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3620,9 +3583,10 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct extcon_dev *extcon;
int err;
- it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
- if (!it6505)
- return -ENOMEM;
+ it6505 = devm_drm_bridge_alloc(&client->dev, struct it6505, bridge,
+ &it6505_bridge_funcs);
+ if (IS_ERR(it6505))
+ return PTR_ERR(it6505);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
@@ -3697,7 +3661,6 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->aux.transfer = it6505_aux_transfer;
drm_dp_aux_init(&it6505->aux);
- it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index b9f90f32145d..aa7b1dcc5d70 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
@@ -842,7 +843,8 @@ static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -1515,9 +1517,10 @@ static int it66121_probe(struct i2c_client *client)
return -ENXIO;
}
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct it66121_ctx, bridge,
+ &it66121_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!ep)
@@ -1576,7 +1579,6 @@ static int it66121_probe(struct i2c_client *client)
return -ENODEV;
}
- ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..342374cb8fc6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -408,7 +408,7 @@ lt8912_connector_detect(struct drm_connector *connector, bool force)
struct lt8912 *lt = connector_to_lt8912(connector);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
@@ -606,12 +607,12 @@ lt8912_bridge_mode_valid(struct drm_bridge *bridge,
}
static enum drm_connector_status
-lt8912_bridge_detect(struct drm_bridge *bridge)
+lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -760,9 +761,10 @@ static int lt8912_probe(struct i2c_client *client)
int ret = 0;
struct device *dev = &client->dev;
- lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
- if (!lt)
- return -ENOMEM;
+ lt = devm_drm_bridge_alloc(dev, struct lt8912, bridge,
+ &lt8912_bridge_funcs);
+ if (IS_ERR(lt))
+ return PTR_ERR(lt);
lt->dev = dev;
lt->i2c_client[0] = client;
@@ -777,7 +779,6 @@ static int lt8912_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt);
- lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 0fc5ea18fe6a..399fa7eebd49 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -726,9 +727,9 @@ static int lt9211_probe(struct i2c_client *client)
struct lt9211 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct lt9211, bridge, &lt9211_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -754,7 +755,6 @@ static int lt9211_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &lt9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 026803034231..a2d032ee4744 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -543,7 +543,8 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
@@ -740,11 +741,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -935,8 +937,8 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
-static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -951,8 +953,8 @@ static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
return 0;
}
-static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -973,8 +975,8 @@ static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
&hparms->cea);
}
-static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -1071,9 +1073,10 @@ static int lt9611_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
- if (!lt9611)
- return -ENOMEM;
+ lt9611 = devm_drm_bridge_alloc(dev, struct lt9611, bridge,
+ &lt9611_bridge_funcs);
+ if (IS_ERR(lt9611))
+ return PTR_ERR(lt9611);
lt9611->dev = dev;
lt9611->client = client;
@@ -1126,11 +1129,10 @@ static int lt9611_probe(struct i2c_client *client)
/* Disable Audio InfoFrame, enabled by default */
regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
- lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..38fb8776c0f4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
@@ -352,7 +353,8 @@ static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge,
lt9611uxc_unlock(lt9611uxc);
}
-static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
@@ -774,9 +776,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
- if (!lt9611uxc)
- return -ENOMEM;
+ lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, &lt9611uxc_bridge_funcs);
+ if (IS_ERR(lt9611uxc))
+ return PTR_ERR(lt9611uxc);
lt9611uxc->dev = dev;
lt9611uxc->client = client;
@@ -855,7 +857,6 @@ retry:
i2c_set_clientdata(client, lt9611uxc);
- lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
lt9611uxc->bridge.of_node = client->dev.of_node;
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (lt9611uxc->hpd_supported)
@@ -880,7 +881,11 @@ retry:
}
}
- return lt9611uxc_audio_init(dev, lt9611uxc);
+ ret = lt9611uxc_audio_init(dev, lt9611uxc);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..e6a7147e141b 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
@@ -117,9 +118,10 @@ static int lvds_codec_probe(struct platform_device *pdev)
u32 val;
int ret;
- lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
- if (!lvds_codec)
- return -ENOMEM;
+ lvds_codec = devm_drm_bridge_alloc(dev, struct lvds_codec, bridge,
+ &funcs);
+ if (IS_ERR(lvds_codec))
+ return PTR_ERR(lvds_codec);
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
@@ -155,8 +157,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
- lvds_codec->bridge.funcs = &funcs;
-
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a47aabf134fd..c9e6505cbd88 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -120,7 +120,8 @@ drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
};
-static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct i2c_client *stdp4028_i2c =
ge_b850v3_lvds_ptr->stdp4028_i2c;
@@ -141,7 +142,7 @@ static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge
static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector,
bool force)
{
- return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge);
+ return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector);
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
@@ -190,6 +191,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
@@ -224,13 +226,11 @@ static int ge_b850v3_lvds_init(struct device *dev)
if (ge_b850v3_lvds_ptr)
goto success;
- ge_b850v3_lvds_ptr = devm_kzalloc(dev,
- sizeof(*ge_b850v3_lvds_ptr),
- GFP_KERNEL);
-
- if (!ge_b850v3_lvds_ptr) {
+ ge_b850v3_lvds_ptr = devm_drm_bridge_alloc(dev, struct ge_b850v3_lvds, bridge,
+ &ge_b850v3_lvds_funcs);
+ if (IS_ERR(ge_b850v3_lvds_ptr)) {
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
- return -ENOMEM;
+ return PTR_ERR(ge_b850v3_lvds_ptr);
}
success:
@@ -263,7 +263,6 @@ static int ge_b850v3_register(void)
struct device *dev = &stdp4028_i2c->dev;
/* drm bridge initialization */
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 53dd140a1b8d..9f4ff82bc6b4 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
@@ -156,9 +157,10 @@ static int mchp_lvds_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct mchp_lvds, bridge,
+ &mchp_lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -191,7 +193,6 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->bridge.of_node = dev->of_node;
lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
dev_set_drvdata(dev, lvds);
ret = devm_pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index d04c62a0cb9f..2f7429b24fc2 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
@@ -1148,9 +1149,10 @@ static int nwl_dsi_probe(struct platform_device *pdev)
struct nwl_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct nwl_dsi, bridge,
+ &nwl_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
@@ -1179,7 +1181,6 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->quirks = (uintptr_t)attr->data;
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27261b2ac9c8..7acb11f16dc1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
@@ -260,10 +261,10 @@ static int ptn3460_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
- if (!ptn_bridge) {
- return -ENOMEM;
- }
+ ptn_bridge = devm_drm_bridge_alloc(dev, struct ptn3460_bridge, bridge,
+ &ptn3460_bridge_funcs);
+ if (IS_ERR(ptn_bridge))
+ return PTR_ERR(ptn_bridge);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -299,7 +300,6 @@ static int ptn3460_probe(struct i2c_client *client)
return ret;
}
- ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 258c85c83a28..184a8b7049a7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -58,6 +59,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +83,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
@@ -286,18 +288,18 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
if (!panel)
return ERR_PTR(-EINVAL);
- panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
- GFP_KERNEL);
- if (!panel_bridge)
- return ERR_PTR(-ENOMEM);
+ panel_bridge = devm_drm_bridge_alloc(panel->dev, struct panel_bridge, bridge,
+ &panel_bridge_bridge_funcs);
+ if (IS_ERR(panel_bridge))
+ return (void *)panel_bridge;
panel_bridge->connector_type = connector_type;
panel_bridge->panel = panel;
- panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type;
+ panel_bridge->bridge.pre_enable_prev_first = panel->prepare_prev_first;
drm_bridge_add(&panel_bridge->bridge);
@@ -326,7 +328,8 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
- devm_kfree(panel_bridge->panel->dev, bridge);
+ /* TODO remove this after reworking panel_bridge lifetime */
+ devm_drm_put_bridge(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
@@ -412,8 +415,6 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return bridge;
}
- bridge->pre_enable_prev_first = panel->prepare_prev_first;
-
*ptr = bridge;
devres_add(dev, ptr);
@@ -455,8 +456,6 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
- bridge->pre_enable_prev_first = panel->prepare_prev_first;
-
return bridge;
}
EXPORT_SYMBOL(drmm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 13ada42a5514..f879a1df077d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
@@ -448,9 +449,10 @@ static int ps8622_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
- if (!ps8622)
- return -ENOMEM;
+ ps8622 = devm_drm_bridge_alloc(dev, struct ps8622_bridge, bridge,
+ &ps8622_bridge_funcs);
+ if (IS_ERR(ps8622))
+ return PTR_ERR(ps8622);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -508,7 +510,6 @@ static int ps8622_probe(struct i2c_client *client)
ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
}
- ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index a42138b33258..825777a5758f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
@@ -635,9 +636,10 @@ static int ps8640_probe(struct i2c_client *client)
int ret;
u32 i;
- ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
- if (!ps_bridge)
- return -ENOMEM;
+ ps_bridge = devm_drm_bridge_alloc(dev, struct ps8640, bridge,
+ &ps8640_bridge_funcs);
+ if (IS_ERR(ps_bridge))
+ return PTR_ERR(ps_bridge);
mutex_init(&ps_bridge->aux_lock);
@@ -661,7 +663,6 @@ static int ps8640_probe(struct i2c_client *client)
if (IS_ERR(ps_bridge->gpio_reset))
return PTR_ERR(ps_bridge->gpio_reset);
- ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 54de6ed2fae8..b5dd71f6a990 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -14,11 +14,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <video/mipi_display.h>
@@ -557,10 +559,6 @@ static void samsung_dsim_reset(struct samsung_dsim *dsi)
samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
}
-#ifndef MHZ
-#define MHZ (1000 * 1000)
-#endif
-
static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
unsigned long fin,
unsigned long fout,
@@ -574,8 +572,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
u16 _m, best_m;
u8 _s, best_s;
- p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ));
- p_max = fin / (driver_data->pll_fin_min * MHZ);
+ p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * HZ_PER_MHZ));
+ p_max = fin / (driver_data->pll_fin_min * HZ_PER_MHZ);
for (_p = p_min; _p <= p_max; ++_p) {
for (_s = 0; _s <= 5; ++_s) {
@@ -590,8 +588,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
tmp = (u64)_m * fin;
do_div(tmp, _p);
- if (tmp < driver_data->min_freq * MHZ ||
- tmp > driver_data->max_freq * MHZ)
+ if (tmp < driver_data->min_freq * HZ_PER_MHZ ||
+ tmp > driver_data->max_freq * HZ_PER_MHZ)
continue;
tmp = (u64)_m * fin;
@@ -634,7 +632,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
* limit.
*/
fin = clk_get_rate(clk_get_parent(dsi->pll_clk));
- while (fin > driver_data->pll_fin_max * MHZ)
+ while (fin > driver_data->pll_fin_max * HZ_PER_MHZ)
fin /= 2;
clk_set_rate(dsi->pll_clk, fin);
@@ -660,10 +658,11 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ 100 * HZ_PER_MHZ, 120 * HZ_PER_MHZ, 160 * HZ_PER_MHZ,
+ 200 * HZ_PER_MHZ, 270 * HZ_PER_MHZ, 320 * HZ_PER_MHZ,
+ 390 * HZ_PER_MHZ, 450 * HZ_PER_MHZ, 510 * HZ_PER_MHZ,
+ 560 * HZ_PER_MHZ, 640 * HZ_PER_MHZ, 690 * HZ_PER_MHZ,
+ 770 * HZ_PER_MHZ, 870 * HZ_PER_MHZ, 950 * HZ_PER_MHZ,
};
int band;
@@ -723,7 +722,7 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
esc_clk = byte_clk / esc_div;
- if (esc_clk > 20 * MHZ) {
+ if (esc_clk > 20 * HZ_PER_MHZ) {
++esc_div;
esc_clk = byte_clk / esc_div;
}
@@ -898,8 +897,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* The user manual describes that following bits are ignored in
* command mode.
*/
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -1095,7 +1092,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
bool first = !xfer->tx_done;
u32 reg;
- dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
+ dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE)
@@ -1235,43 +1232,34 @@ static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
{
unsigned long flags;
struct samsung_dsim_transfer *xfer;
- bool start = false;
-again:
spin_lock_irqsave(&dsi->transfer_lock, flags);
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct samsung_dsim_transfer, list);
+ while (!list_empty(&dsi->transfer_list)) {
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
- samsung_dsim_send_to_fifo(dsi, xfer);
+ samsung_dsim_send_to_fifo(dsi, xfer);
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
- xfer->result = 0;
- complete(&xfer->completed);
+ xfer->result = 0;
+ complete(&xfer->completed);
- spin_lock_irqsave(&dsi->transfer_lock, flags);
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
+ list_del_init(&xfer->list);
+ }
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
}
static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
@@ -1293,7 +1281,7 @@ static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev,
- "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
xfer->rx_done);
@@ -1640,11 +1628,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1935,9 +1924,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
struct samsung_dsim *dsi;
int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -2007,7 +1996,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 914a2609a685..d537b1d036fb 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -416,6 +416,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,12 +453,13 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
-static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -1134,10 +1136,10 @@ static int sii902x_init(struct sii902x *sii902x)
if (ret)
goto err_unreg_audio;
- sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
@@ -1168,9 +1170,9 @@ static int sii902x_probe(struct i2c_client *client)
return -EIO;
}
- sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
- if (!sii902x)
- return -ENOMEM;
+ sii902x = devm_drm_bridge_alloc(dev, struct sii902x, bridge, &sii902x_bridge_funcs);
+ if (IS_ERR(sii902x))
+ return PTR_ERR(sii902x);
sii902x->i2c = client;
sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index cd7837c9a6e0..bb1bed03eb5b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -888,9 +888,10 @@ static int sii9234_probe(struct i2c_client *client)
struct device *dev = &client->dev;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii9234, bridge,
+ &sii9234_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -921,7 +922,6 @@ static int sii9234_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii9234_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..9e48ad39e1cc 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
@@ -2290,9 +2291,10 @@ static int sii8620_probe(struct i2c_client *client)
struct sii8620 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii8620, bridge,
+ &sii8620_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -2335,7 +2337,6 @@ static int sii8620_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..3d15ddd39470 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -90,7 +90,7 @@ simple_bridge_connector_detect(struct drm_connector *connector, bool force)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- return drm_bridge_detect(sbridge->next_bridge);
+ return drm_bridge_detect(sbridge->next_bridge, connector);
}
static const struct drm_connector_funcs simple_bridge_con_funcs = {
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
@@ -167,9 +168,10 @@ static int simple_bridge_probe(struct platform_device *pdev)
struct simple_bridge *sbridge;
struct device_node *remote;
- sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
- if (!sbridge)
- return -ENOMEM;
+ sbridge = devm_drm_bridge_alloc(&pdev->dev, struct simple_bridge,
+ bridge, &simple_bridge_bridge_funcs);
+ if (IS_ERR(sbridge))
+ return PTR_ERR(sbridge);
sbridge->info = of_device_get_match_data(&pdev->dev);
@@ -203,7 +205,6 @@ static int simple_bridge_probe(struct platform_device *pdev)
"Unable to retrieve enable GPIO\n");
/* Register the bridge. */
- sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
sbridge->bridge.of_node = pdev->dev.of_node;
sbridge->bridge.timings = sbridge->info->timings;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 6166f197e37b..39332c57f2c5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -8,6 +8,7 @@
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -439,8 +440,8 @@ static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long lo
dw_hdmi_qp_set_cts_n(hdmi, cts, n);
}
-static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -450,8 +451,8 @@ static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
return 0;
}
-static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -496,8 +497,8 @@ static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
}
-static void dw_hdmi_qp_audio_disable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -875,7 +876,7 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
+dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
@@ -1045,9 +1046,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(-ENODEV);
}
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi_qp, bridge,
+ &dw_hdmi_qp_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return ERR_CAST(hdmi);
hdmi->dev = dev;
@@ -1073,10 +1075,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0890add5f707..206b099a35e9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
@@ -22,8 +23,8 @@
#include <media/cec-notifier.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -2889,12 +2890,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
@@ -2976,7 +2978,8 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->mutex);
}
-static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -3332,9 +3335,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
u8 config0;
u8 config3;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
hdmi->plat_data = plat_data;
hdmi->dev = dev;
@@ -3494,7 +3497,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
}
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2b6e70a49f43..8fc2e282ff11 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
@@ -1072,15 +1073,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
@@ -1193,9 +1195,10 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct dw_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return ERR_PTR(-ENOMEM);
+ dsi = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi, bridge,
+ &dw_mipi_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return ERR_CAST(dsi);
dsi->dev = dev;
dsi->plat_data = plat_data;
@@ -1264,7 +1267,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
dsi->bridge.of_node = pdev->dev.of_node;
return dsi;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index 5fd7a459efdd..5926a3a05d79 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -870,15 +871,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
@@ -913,9 +915,10 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
struct dw_mipi_dsi2 *dsi2;
int ret;
- dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
- if (!dsi2)
- return ERR_PTR(-ENOMEM);
+ dsi2 = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi2, bridge,
+ &dw_mipi_dsi2_bridge_funcs);
+ if (IS_ERR(dsi2))
+ return ERR_CAST(dsi2);
dsi2->dev = dev;
dsi2->plat_data = plat_data;
@@ -980,7 +983,6 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
}
dsi2->bridge.driver_private = dsi2;
- dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
dsi2->bridge.of_node = pdev->dev.of_node;
return dsi2;
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 49c76027f831..98df3e667d4a 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
@@ -264,9 +265,10 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
struct tc358762 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358762, bridge,
+ &tc358762_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -287,7 +289,6 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..084e9d898e22 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
@@ -346,9 +347,10 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
struct tc358764 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358764, bridge,
+ &tc358764_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -367,7 +369,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 39e2d3a7a27d..4097fef4b86b 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -344,6 +344,14 @@
#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
+enum tc_mode {
+ mode_dpi_to_edp = BIT(1) | BIT(2),
+ mode_dpi_to_dp = BIT(1),
+ mode_dsi_to_edp = BIT(0) | BIT(2),
+ mode_dsi_to_dp = BIT(0),
+ mode_dsi_to_dpi = BIT(0) | BIT(1),
+};
+
static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
@@ -1752,7 +1760,8 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.get_modes = tc_connector_get_modes,
};
-static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
bool conn;
@@ -1777,7 +1786,7 @@ tc_connector_detect(struct drm_connector *connector, bool force)
struct tc_data *tc = connector_to_tc(connector);
if (tc->hpd_pin >= 0)
- return tc_bridge_detect(&tc->bridge);
+ return tc_bridge_detect(&tc->bridge, connector);
if (tc->panel_bridge)
return connector_status_connected;
@@ -1795,6 +1804,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1807,6 +1817,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
@@ -2325,7 +2336,6 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
if (bridge) {
tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
- tc->bridge.funcs = &tc_dpi_bridge_funcs;
return 0;
}
@@ -2358,7 +2368,6 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
- tc->bridge.funcs = &tc_edp_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2366,17 +2375,11 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
return 0;
}
-static int tc_probe_bridge_endpoint(struct tc_data *tc)
+static enum tc_mode tc_probe_get_mode(struct device *dev)
{
- struct device *dev = tc->dev;
struct of_endpoint endpoint;
struct device_node *node = NULL;
- const u8 mode_dpi_to_edp = BIT(1) | BIT(2);
- const u8 mode_dpi_to_dp = BIT(1);
- const u8 mode_dsi_to_edp = BIT(0) | BIT(2);
- const u8 mode_dsi_to_dp = BIT(0);
- const u8 mode_dsi_to_dpi = BIT(0) | BIT(1);
- u8 mode = 0;
+ enum tc_mode mode = 0;
/*
* Determine bridge configuration.
@@ -2399,7 +2402,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+ }
+
+ if (mode != mode_dpi_to_edp &&
+ mode != mode_dpi_to_dp &&
+ mode != mode_dsi_to_dpi &&
+ mode != mode_dsi_to_edp &&
+ mode != mode_dsi_to_dp) {
+ dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
+ return -EINVAL;
+ }
+
+ return mode;
+}
+static int tc_probe_bridge_endpoint(struct tc_data *tc, enum tc_mode mode)
+{
+ struct device *dev = tc->dev;
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(dev->of_node, node) {
+ of_graph_parse_endpoint(node, &endpoint);
if (endpoint.port == 2) {
of_property_read_u8_array(node, "toshiba,pre-emphasis",
tc->pre_emphasis,
@@ -2425,24 +2449,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return tc_probe_edp_bridge_endpoint(tc);
}
- dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
-
+ /* Should never happen, mode was validated by tc_probe_get_mode() */
return -EINVAL;
}
static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ const struct drm_bridge_funcs *funcs;
struct tc_data *tc;
+ int mode;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ mode = tc_probe_get_mode(dev);
+ funcs = (mode == mode_dsi_to_dpi) ? &tc_dpi_bridge_funcs : &tc_edp_bridge_funcs;
+
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge, funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
- ret = tc_probe_bridge_endpoint(tc);
+ ret = tc_probe_bridge_endpoint(tc, mode);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..fbdc44e16229 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -1282,9 +1287,10 @@ static int tc358768_i2c_probe(struct i2c_client *client)
if (!np)
return -ENODEV;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tc358768_priv, bridge,
+ &tc358768_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
priv->dev = dev;
@@ -1316,7 +1322,6 @@ static int tc358768_i2c_probe(struct i2c_client *client)
priv->dsi_host.dev = dev;
priv->dsi_host.ops = &tc358768_dsi_host_ops;
- priv->bridge.funcs = &tc358768_bridge_funcs;
priv->bridge.timings = &default_tc358768_timings;
priv->bridge.of_node = np;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index c89757bec4e6..366b12db0e7c 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
@@ -662,9 +659,10 @@ static int tc_probe(struct i2c_client *client)
struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge,
+ &tc_bridge_funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
tc->i2c = client;
@@ -704,7 +702,6 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 20658258fb51..e636459d9185 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -751,7 +751,8 @@ tda998x_reset(struct tda998x_priv *priv)
*/
static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
+ struct tda998x_priv *priv = timer_container_of(priv, t,
+ edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1365,6 +1366,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
@@ -1780,9 +1782,9 @@ static int tda998x_create(struct device *dev)
u32 video;
int rev_lo, rev_hi, ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -1947,7 +1949,6 @@ static int tda998x_create(struct device *dev)
tda998x_audio_codec_init(priv, &client->dev);
}
- priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..2cb7cd0c0608 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
@@ -180,9 +181,10 @@ static int thc63_probe(struct platform_device *pdev)
struct thc63_dev *thc63;
int ret;
- thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
- if (!thc63)
- return -ENOMEM;
+ thc63 = devm_drm_bridge_alloc(&pdev->dev, struct thc63_dev, bridge,
+ &thc63_bridge_func);
+ if (IS_ERR(thc63))
+ return PTR_ERR(thc63);
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
@@ -207,7 +209,6 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
- thc63->bridge.funcs = &thc63_bridge_func;
thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 85f2a0e74a1c..b07f7c9d5890 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
@@ -348,9 +348,10 @@ static int dlpc3433_probe(struct i2c_client *client)
struct dlpc *dlpc;
int ret;
- dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
- if (!dlpc)
- return -ENOMEM;
+ dlpc = devm_drm_bridge_alloc(dev, struct dlpc, bridge,
+ &dlpc_bridge_funcs);
+ if (IS_ERR(dlpc))
+ return PTR_ERR(dlpc);
dlpc->dev = dev;
@@ -365,7 +366,6 @@ static int dlpc3433_probe(struct i2c_client *client)
dev_set_drvdata(dev, dlpc);
i2c_set_clientdata(client, dlpc);
- dlpc->bridge.funcs = &dlpc_bridge_funcs;
dlpc->bridge.of_node = dev->of_node;
drm_bridge_add(&dlpc->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 95563aa1b450..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
{
- struct drm_device *dev = sn65dsi83->bridge.dev;
struct drm_modeset_acquire_ctx ctx;
int err;
@@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
* Keep the lock during the whole operation to be atomic.
*/
- DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
-
- if (!sn65dsi83->bridge.encoder->crtc) {
- /*
- * No CRTC attached -> No CRTC active outputs to reset
- * This can happen when the SN65DSI83 is reset. Simply do
- * nothing without returning any errors.
- */
- err = 0;
- goto end;
- }
+ drm_modeset_acquire_init(&ctx, 0);
dev_warn(sn65dsi83->dev, "reset the pipe\n");
- err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-end:
- DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
- return err;
+ return 0;
}
static void sn65dsi83_reset_work(struct work_struct *ws)
@@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
@@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 01d456b955ab..464390372b34 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -35,6 +35,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -330,12 +348,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
* 200 ms. We'll assume that the panel driver will have the hardcoded
* delay in its prepare and always disable HPD.
*
- * If HPD somehow makes sense on some future panel we'll have to
- * change this to be conditional on someone specifying that HPD should
- * be used.
+ * For DisplayPort bridge type, we need HPD. So we use the bridge type
+ * to conditionally disable HPD.
+ * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
+ * can be called before. So for DisplayPort, HPD will be enabled once
+ * bridge type is set. We are using bridge type instead of "no-hpd"
+ * property because it is not used properly in devicetree description
+ * and hence is unreliable.
*/
- regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
- HPD_DISABLE);
+
+ if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
pdata->comms_enabled = true;
@@ -376,7 +400,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
return ret;
}
@@ -423,57 +447,12 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
-static void ti_sn65dsi86_uninit_aux(void *data)
-{
- auxiliary_device_uninit(data);
-}
-
-static void ti_sn65dsi86_delete_aux(void *data)
-{
- auxiliary_device_delete(data);
-}
-
-static void ti_sn65dsi86_aux_device_release(struct device *dev)
-{
- struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
-
- kfree(aux);
-}
-
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device **aux_out,
const char *name)
@@ -481,34 +460,16 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct device *dev = pdata->dev;
const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
- int ret;
+ int id;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ id = (client->adapter->nr << 10) | client->addr;
+ aux = __devm_auxiliary_device_create(dev, KBUILD_MODNAME, name,
+ NULL, id);
if (!aux)
- return -ENOMEM;
-
- aux->name = name;
- aux->id = (client->adapter->nr << 10) | client->addr;
- aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_aux_device_release;
- device_set_of_node_from_dev(&aux->dev, dev);
- ret = auxiliary_device_init(aux);
- if (ret) {
- kfree(aux);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
- if (ret)
- return ret;
-
- ret = auxiliary_device_add(aux);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
- if (!ret)
- *aux_out = aux;
+ return -ENODEV;
- return ret;
+ *aux_out = aux;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -732,6 +693,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -748,7 +710,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -821,12 +783,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -856,12 +819,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -960,10 +925,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1105,7 +1071,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1140,7 +1106,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1156,7 +1122,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1171,7 +1137,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
@@ -1195,14 +1161,20 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
-static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
- pm_runtime_get_sync(pdata->dev);
+ /*
+ * Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
+ * as the chip won't report HPD just after being powered on.
+ * HPD_DEBOUNCED_STATE reflects correct state only after the
+ * debounce time (~100-400 ms).
+ */
+
regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
- pm_runtime_put_autosuspend(pdata->dev);
return val & HPD_DEBOUNCED_STATE ? connector_status_connected
: connector_status_disconnected;
@@ -1216,6 +1188,35 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
+static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ /*
+ * Device needs to be powered on before reading the HPD state
+ * for reliable hpd detection in ti_sn_bridge_detect() due to
+ * the high debounce time.
+ */
+
+ pm_runtime_get_sync(pdata->dev);
+}
+
+static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ pm_runtime_put_autosuspend(pdata->dev);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1229,6 +1230,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
+ .hpd_enable = ti_sn_bridge_hpd_enable,
+ .hpd_disable = ti_sn_bridge_hpd_disable,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1312,13 +1316,30 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
if (ret)
return ret;
- pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
- if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
- pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HPD;
+ /*
+ * If comms were already enabled they would have been enabled
+ * with the wrong value of HPD_DISABLE. Update it now. Comms
+ * could be enabled if anyone is holding a pm_runtime reference
+ * (like if a GPIO is in use). Note that in most cases nobody
+ * is doing AUX channel xfers before the bridge is added so
+ * HPD doesn't _really_ matter then. The only exception is in
+ * the eDP case where the panel wants to read the EDID before
+ * the bridge is added. We always consistently have HPD disabled
+ * for eDP.
+ */
+ mutex_lock(&pdata->comms_mutex);
+ if (pdata->comms_enabled)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
+ HPD_DISABLE, 0);
+ mutex_unlock(&pdata->comms_mutex);
+ }
drm_bridge_add(&pdata->bridge);
@@ -1703,24 +1724,15 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
}
-static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
- int ret;
-
- if (!test_bit(offset, pdata->gchip_output)) {
- dev_err(pdata->dev, "Ignoring GPIO set while input\n");
- return;
- }
val &= 1;
- ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
- BIT(SN_GPIO_OUTPUT_SHIFT + offset),
- val << (SN_GPIO_OUTPUT_SHIFT + offset));
- if (ret)
- dev_warn(pdata->dev,
- "Failed to set bridge GPIO %u: %d\n", offset, ret);
+ return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
}
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
@@ -1894,6 +1906,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1901,9 +1914,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return -ENODEV;
}
- pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
@@ -1937,7 +1950,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
/*
* Break ourselves up into a collection of aux devices. The only real
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 22316382451f..27053d020df7 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
@@ -66,9 +68,10 @@ static int tdp158_probe(struct i2c_client *client)
struct tdp158 *tdp158;
struct device *dev = &client->dev;
- tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
- if (!tdp158)
- return -ENOMEM;
+ tdp158 = devm_drm_bridge_alloc(dev, struct tdp158, bridge,
+ &tdp158_bridge_funcs);
+ if (IS_ERR(tdp158))
+ return PTR_ERR(tdp158);
tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(tdp158->next))
@@ -87,7 +90,6 @@ static int tdp158_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
tdp158->bridge.of_node = dev->of_node;
- tdp158->bridge.funcs = &tdp158_bridge_funcs;
tdp158->bridge.driver_private = tdp158;
tdp158->dev = dev;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..b80ee089f880 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -89,7 +89,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- return drm_bridge_detect(dvi->next_bridge);
+ return drm_bridge_detect(dvi->next_bridge, connector);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
@@ -340,14 +341,14 @@ static int tfp410_init(struct device *dev, bool i2c)
return -ENXIO;
}
- dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
- if (!dvi)
- return -ENOMEM;
+ dvi = devm_drm_bridge_alloc(dev, struct tfp410, bridge,
+ &tfp410_bridge_funcs);
+ if (IS_ERR(dvi))
+ return PTR_ERR(dvi);
dvi->dev = dev;
dev_set_drvdata(dev, dvi);
- dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..dcf686c4e73d 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
@@ -76,6 +77,12 @@ static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
return connector_status_disconnected;
}
+static enum drm_connector_status
+tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return tpd12s015_detect(bridge);
+}
+
static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -93,7 +100,7 @@ static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
.attach = tpd12s015_attach,
.detach = tpd12s015_detach,
- .detect = tpd12s015_detect,
+ .detect = tpd12s015_bridge_detect,
.hpd_enable = tpd12s015_hpd_enable,
.hpd_disable = tpd12s015_hpd_disable,
};
@@ -115,13 +122,13 @@ static int tpd12s015_probe(struct platform_device *pdev)
struct gpio_desc *gpio;
int ret;
- tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
- if (!tpd)
- return -ENOMEM;
+ tpd = devm_drm_bridge_alloc(&pdev->dev, struct tpd12s015_device,
+ bridge, &tpd12s015_bridge_funcs);
+ if (IS_ERR(tpd))
+ return PTR_ERR(tpd);
platform_set_drvdata(pdev, tpd);
- tpd->bridge.funcs = &tpd12s015_bridge_funcs;
tpd->bridge.of_node = pdev->dev.of_node;
tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 19fe01257ab9..ac5e7ed195cf 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -113,19 +113,8 @@ mkdir -p install/modules/
INSTALL_MOD_PATH=install/modules/ make modules_install
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
- make Image.lzma
- mkimage \
- -f auto \
- -A arm \
- -O linux \
- -d arch/arm64/boot/Image.lzma \
- -C lzma\
- -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
- KERNEL_IMAGE_NAME+=" cheza-kernel"
-
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
@@ -139,7 +128,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,7 +137,7 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
done
@@ -165,7 +154,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 274f118533a7..af27ff5de369 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -13,7 +13,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
KERNEL_IMAGE_NAME: "zImage"
@@ -24,7 +24,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm64/configs/defconfig"
KERNEL_IMAGE_NAME: "Image"
@@ -44,16 +44,22 @@
igt:arm32:
extends: .build:arm32
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:arm64:
extends: .build:arm64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:x86_64:
extends: .build:x86_64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
@@ -67,7 +73,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -79,7 +85,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -91,7 +97,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -143,6 +149,10 @@ debian-arm64-release:
rules:
- when: never
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
debian-build-testing:
rules:
- when: never
@@ -183,6 +193,10 @@ debian-testing-msan:
rules:
- when: never
+debian-testing-ubsan:
+ rules:
+ - when: never
+
debian-vulkan:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/check-devicetrees.yml b/drivers/gpu/drm/ci/check-devicetrees.yml
new file mode 100644
index 000000000000..727bd56018b8
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-devicetrees.yml
@@ -0,0 +1,50 @@
+.dt-check-base:
+ stage: static-checks
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ SCHEMA: "display:gpu"
+ VENV_PATH: "/tmp/dtcheck-venv"
+ before_script:
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION} python3-dev python3-venv python3-pip yamllint
+ - python3 -m venv "${VENV_PATH}"
+ - source "${VENV_PATH}/bin/activate"
+ - pip3 install dtschema
+ script:
+ - drivers/gpu/drm/ci/${SCRIPT_NAME}
+ artifacts:
+ when: on_failure
+ paths:
+ - ${ARTIFACT_FILE}
+ allow_failure:
+ exit_codes:
+ - 102
+
+dtbs-check:arm32:
+ extends:
+ - .build:arm32
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dtbs-check:arm64:
+ extends:
+ - .build:arm64
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dt-binding-check:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dt-binding-check.sh"
+ ARTIFACT_FILE: "dt-binding-check.log"
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 07dc13ff865d..5f90508578a3 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -20,15 +20,15 @@ debian/arm64_build:
EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
# Disable container jobs that we won't use
-alpine/x86_64_build:
+debian/arm64_test-vk:
rules:
- when: never
-debian/arm64_test-gl:
+debian/baremetal_arm32_test-gl:
rules:
- when: never
-debian/arm64_test-vk:
+debian/baremetal_arm64_test-vk:
rules:
- when: never
@@ -40,19 +40,27 @@ debian/s390x_build:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/x86_32_build:
rules:
- when: never
-fedora/x86_64_build:
+debian/x86_64_test-android:
rules:
- when: never
-debian/android_build:
+debian/x86_64_test-video:
+ rules:
+ - when: never
+
+debian/x86_64_test-vk:
rules:
- when: never
-.debian/x86_64_test-android:
+fedora/x86_64_build:
+ rules:
+ - when: never
+
+debian/android_build:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/dt-binding-check.sh b/drivers/gpu/drm/ci/dt-binding-check.sh
new file mode 100755
index 000000000000..99e1c0df84b7
--- /dev/null
+++ b/drivers/gpu/drm/ci/dt-binding-check.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+VENV_PATH="${VENV_PATH:-/tmp/dtschema-venv}"
+source "${VENV_PATH}/bin/activate"
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" dt_binding_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dt-binding-check.log; then
+ echo "ERROR: 'make dt_binding_check' failed. Please check dt-binding-check.log for details."
+ exit 1
+fi
+
+if [[ -s dt-binding-check.log ]]; then
+ echo "WARNING: dt_binding_check reported warnings. Please check dt-binding-check.log" \
+ "for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/dtbs-check.sh b/drivers/gpu/drm/ci/dtbs-check.sh
new file mode 100755
index 000000000000..57842c452439
--- /dev/null
+++ b/drivers/gpu/drm/ci/dtbs-check.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+make LLVM=1 ARCH="${KERNEL_ARCH}" defconfig
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" ARCH="${KERNEL_ARCH}" LLVM=1 dtbs_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dtbs-check.log; then
+ echo "ERROR: 'make dtbs_check' failed. Please check dtbs-check.log for details."
+ exit 1
+fi
+
+if [[ -s dtbs-check.log ]]; then
+ echo "WARNING: dtbs_check reported warnings. Please check dtbs-check.log for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index f04aabe8327c..d502d146b177 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,17 +1,17 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 02337aec715c25dae7ff2479d986f831c77fe536
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
+ IGT_VERSION: 129d5b10baaadea1d6cd6377341c4cb42e7ee6fd
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
- MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb
DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
CI_PRE_CLONE_SCRIPT: |-
set -o xtrace
@@ -36,7 +36,11 @@ variables:
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
- LAVA_JOB_PRIORITY: 30
+ # Default priority for non-merge pipelines
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: "" # Empty tags are ignored by gitlab
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm
+ FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64
+ JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
@@ -71,15 +75,12 @@ default:
include:
- project: 'freedesktop/ci-templates'
- ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
- file:
- - '/templates/ci-fairy.yml'
- - project: 'freedesktop/ci-templates'
ref: *ci-templates-commit
file:
- '/templates/alpine.yml'
- '/templates/debian.yml'
- '/templates/fedora.yml'
+ - '/templates/ci-fairy.yml'
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
@@ -103,20 +104,26 @@ include:
- '/src/microsoft/ci/gitlab-ci-inc.yml'
- '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
+ - 'docs/gitlab-ci.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
- drivers/gpu/drm/ci/static-checks.yml
- drivers/gpu/drm/ci/build.yml
- drivers/gpu/drm/ci/test.yml
+ - drivers/gpu/drm/ci/check-devicetrees.yml
+ - drivers/gpu/drm/ci/kunit.yml
- 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
stages:
- sanity
- container
+ - deploy
- git-archive
- build-for-tests
- build-only
+ - static-checks
+ - kunit
- code-validation
- amdgpu
- i915
@@ -143,11 +150,11 @@ stages:
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
# Rules applied to every job in the pipeline
@@ -170,29 +177,48 @@ stages:
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
- # Build everything in merge pipelines, if any files affecting the pipeline
- # were changed
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- changes: &all_paths
- - drivers/gpu/drm/ci/**/*
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- changes:
- *all_paths
when: manual
- # Skip everything for pre-merge and merge pipelines which don't change
- # anything in the build
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
+
+
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
+ rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- when: never
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- when: never
+ when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
- when: on_success
+ when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: on_success
+ when: delayed
+ start_in: *build-delay
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
@@ -211,16 +237,20 @@ stages:
- _build/meson-logs/strace
+python-artifacts:
+ variables:
+ GIT_DEPTH: 10
+
+
# Git archive
-make git archive:
+make-git-archive:
extends:
- .fdo.ci-fairy
stage: git-archive
rules:
- !reference [.scheduled_pipeline-rules, rules]
- # ensure we are running on packet
tags:
- - packet.net
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
script:
# Remove drm-ci files we just added
- rm -rf .gitlab-ci.*
@@ -240,6 +270,8 @@ sanity:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- if: *is-pre-merge
when: on_success
@@ -258,7 +290,6 @@ sanity:
DEBIAN_BUILD_TAG
DEBIAN_PYUTILS_TAG
DEBIAN_TEST_GL_TAG
- KERNEL_ROOTFS_TAG
KERNEL_TAG
PKG_REPO_REV
)
@@ -274,14 +305,14 @@ sanity:
when: on_failure
reports:
junit: check-*.xml
- tags:
- - placeholder-job
mr-label-maker-test:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- !reference [.mr-label-maker-rules, rules]
variables:
@@ -304,3 +335,15 @@ mr-label-maker-test:
optional: true
- job: toml-lint
optional: true
+
+deploy-docs:
+ rules:
+ - when: never
+
+linkcheck-docs:
+ rules:
+ - when: never
+
+test-docs:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 68b042e43b7f..b24d4bc53cda 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -19,6 +19,7 @@ set +e
cat /sys/kernel/debug/dri/*/state
set -e
+mkdir -p /lib/modules
case "$DRIVER_NAME" in
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
@@ -85,5 +86,16 @@ deqp-runner junit \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
+
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 20049f3626b2..7acc2e2a8eaa 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,22 +1,18 @@
variables:
- CONTAINER_TAG: "20250204-mesa-uprev"
- DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ CONTAINER_TAG: "20250502-mesa-uprev"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+ DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_TAG: "v6.13-rc4-mesa-5e77"
+ KERNEL_TAG: "v6.14-mesa-0bdd"
KERNEL_REPO: "gfx-ci/linux"
- PKG_REPO_REV: "bca9635d"
+ PKG_REPO_REV: "95bf62c"
- DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
- DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+ ALPINE_X86_64_BUILD_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
+
+ CONDITIONAL_BUILD_ANGLE_TAG: 384145a4023315dae658259bee07c43a
+ CONDITIONAL_BUILD_PIGLIT_TAG: a19e424b8a3f020dbf1b9dd29f220a4f
diff --git a/drivers/gpu/drm/ci/kunit.sh b/drivers/gpu/drm/ci/kunit.sh
new file mode 100755
index 000000000000..7a1052fd3f17
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+export PATH="/usr/bin:$PATH"
+
+./tools/testing/kunit/kunit.py run \
+ --arch "${KERNEL_ARCH}" \
+ --make_options LLVM=1 \
+ --kunitconfig=drivers/gpu/drm/tests
diff --git a/drivers/gpu/drm/ci/kunit.yml b/drivers/gpu/drm/ci/kunit.yml
new file mode 100644
index 000000000000..0d5b2c4433d2
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.yml
@@ -0,0 +1,37 @@
+.kunit-packages: &kunit-packages
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION}
+
+.kunit-base:
+ stage: kunit
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ script:
+ - drivers/gpu/drm/ci/kunit.sh
+
+kunit:arm32:
+ extends:
+ - .build:arm32
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-arm
+
+kunit:arm64:
+ extends:
+ - .build:arm64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-aarch64
+
+kunit:x86_64:
+ extends:
+ - .build:x86_64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-x86
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6e5ac51e8c0a..a295102c3468 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -41,14 +41,14 @@ section_start prepare_rootfs "Preparing root filesystem"
set -ex
-section_switch rootfs "Assembling root filesystem"
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
rm -rf results
mkdir -p results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
@@ -63,6 +63,9 @@ section_switch lava_submit "Submitting job for scheduling"
touch results/lava.log
tail -f results/lava.log &
+# Ensure that we are printing the commands that are being executed,
+# making it easier to debug the job in case it fails.
+set -x
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
diff --git a/drivers/gpu/drm/ci/setup-llvm-links.sh b/drivers/gpu/drm/ci/setup-llvm-links.sh
new file mode 100755
index 000000000000..ace33af82a3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/setup-llvm-links.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: MIT
+set -euo pipefail
+
+ln -svf "$(which clang++-${LLVM_VERSION})" /usr/bin/clang++
+ln -svf "$(which clang-${LLVM_VERSION})" /usr/bin/clang
+ln -svf "$(which ld.lld-${LLVM_VERSION})" /usr/bin/ld.lld
+ln -svf "$(which lld-${LLVM_VERSION})" /usr/bin/lld
+ln -svf "$(which llvm-ar-${LLVM_VERSION})" /usr/bin/llvm-ar
+ln -svf "$(which llvm-nm-${LLVM_VERSION})" /usr/bin/llvm-nm
+ln -svf "$(which llvm-objcopy-${LLVM_VERSION})" /usr/bin/llvm-objcopy
+ln -svf "$(which llvm-readelf-${LLVM_VERSION})" /usr/bin/llvm-readelf
+ln -svf "$(which llvm-strip-${LLVM_VERSION})" /usr/bin/llvm-strip
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 6a1e059858e5..81147e86bfd0 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,6 +1,14 @@
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
+
.lava-test:
extends:
- .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -16,7 +24,7 @@
.lava-igt:arm32:
extends:
- - .lava-test:arm32
+ - .lava-arm32-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "armhf"
@@ -25,15 +33,14 @@
- testing:arm32
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm32
- - debian/x86_64_build
+ - debian/arm32_test-gl
- python-artifacts
- testing:arm32
- igt:arm32
.lava-igt:arm64:
extends:
- - .lava-test:arm64
+ - .lava-arm64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "arm64"
@@ -42,15 +49,14 @@
- testing:arm64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm64
- - debian/x86_64_build
+ - debian/arm64_test-gl
- python-artifacts
- testing:arm64
- igt:arm64
.lava-igt:x86_64:
extends:
- - .lava-test:x86_64
+ - .lava-x86_64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "amd64"
@@ -59,16 +65,16 @@
- testing:x86_64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_x86_64
- - debian/x86_64_build
+ - debian/x86_64_test-gl
- python-artifacts
- testing:x86_64
- igt:x86_64
.baremetal-igt-arm64:
extends:
- - .baremetal-test-arm64
- - .use-debian/baremetal_arm64_test
+ - .baremetal-test-arm64-gl
+ - .use-debian/baremetal_arm64_test-gl
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -82,13 +88,37 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/baremetal_arm64_test
+ - debian/baremetal_arm64_test-gl
- job: testing:arm64
artifacts: false
- igt:arm64
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .test-gl
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM
+ before_script:
+ - !reference [default, before_script]
+ - rm -rf install
+ - tar -xf artifacts/install.tar
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -133,7 +163,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -147,21 +177,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
-
-msm:sdm845:
- extends:
- - .baremetal-igt-arm64
- stage: msm
- parallel: 6
- variables:
- DEVICE_TYPE: sdm845-cheza-r3
- DRIVER_NAME: msm
- BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
- GPU_VERSION: sdm845
- RUNNER_TAG: google-freedreno-cheza
- script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -292,7 +308,7 @@ i915:cml:
variables:
DEVICE_TYPE: asus-C436FA-Flip-hatch
GPU_VERSION: cml
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-Flip-hatch
i915:tgl:
extends:
@@ -440,47 +456,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 75374085f40f..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
-kms_async_flips@alternate-sync-async-flip,Fail
-kms_async_flips@alternate-sync-async-flip-atomic,Fail
-kms_async_flips@test-cursor,Fail
-kms_async_flips@test-cursor-atomic,Fail
-kms_async_flips@test-time-stamp,Fail
-kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3879c4812a22..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index a29cea4f234c..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,22 +1,18 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_flip@dpms-off-confusion-interruptible,Timeout
-kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 2ef1dc35a7fa..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index ee11999e3da1..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,4 +1,3 @@
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
-kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
@@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 47b3f1d42bb6..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
-kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
@@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index c87ff8b40e99..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,7 +17,6 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 843c363b42f5..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,39 +1,47 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 219ae839323a..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index 0e08fff741aa..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -3,12 +3,13 @@ i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
@@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_rotation_crc@bad-pixel-format,Fail
@@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index dc722d6a774e..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 93d42b146df9..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,13 +1,14 @@
api_intel_allocator@reopen,Timeout
api_intel_bb@destroy-bb,Timeout
core_hotunplug@hotrebind-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
@@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
syncobj_basic@illegal-fd-to-handle,Timeout
@@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index 938377896841..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 1cb6978c86dc..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 4f176c04ec4e..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2956567c3048..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 5a063361d7f2..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,12 +1,38 @@
-core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 7752adff05c1..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 8910afb6acf2..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 478d7c161616..154b047787b2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -28,3 +29,11 @@ kms_cursor_crc@cursor-random-max-size
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index ef9318afcd89..a9bb3e1ad75c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -22,3 +23,8 @@ core_hotunplug.*
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
deleted file mode 100644
index 7a2ab58b706f..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@overlay,Fail
-kms_plane_cursor@viewport,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
deleted file mode 100644
index 38ec0305c1f4..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ /dev/null
@@ -1,132 +0,0 @@
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-legacy
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-oom-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@short-flip-before-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@flip-vs-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-msm/msm_shrink@copy-mmap-oom-8
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_lease@page-flip-implicit-plane
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc5
-kms_flip@flip-vs-expired-vblank
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
deleted file mode 100644
index 2ce7f7e23a01..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-# Hangs machine
-kms_bw.*
-
-# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm/msm_mapping@*
-
-# Skip driver specific tests
-^amdgpu.*
-nouveau_.*
-^panfrost.*
-^v3d.*
-^vc4.*
-^vmwgfx*
-
-# Skip intel specific tests
-gem_.*
-i915_.*
-tools_test.*
-
-# Currently fails and causes coverage loss for other tests
-# since core_getversion also fails.
-core_hotunplug.*
-
-# Whole machine hangs
-kms_cursor_crc.*
-
-# IGT test crash
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_content_protection@uevent
-
-# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
-# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
-kms_display_modes@extended-mode-basic
-kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ba9160d4d8eb..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 2803d0d80192..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 348b4ce7eb4b..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
# Failure Rate: 40
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-2160x1440p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
# Failure Rate: 80
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane_multiple@tiling-none
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-absolute-wf_vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-blocking-wf-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
# Failure Rate: 60
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-2-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
# Failure Rate: 20
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index e8e994d92557..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index adbcdd0f28d2..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,6 +19,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 319789806271..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,5 +1,6 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
# kernel panic seen with kms_cursor_crc tests
kms_cursor_crc.*
@@ -802,6 +803,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index e17265039ca8..72480db1f00d 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
@@ -31,6 +34,10 @@ MODULE_PARM_DESC(active,
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n");
+ return;
+ }
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (!strcmp(drm_client_default, "fbdev")) {
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 8d22b7627d41..df09cf9a8ca1 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -8,6 +8,7 @@ config DRM_DISPLAY_DP_AUX_BUS
config DRM_DISPLAY_HELPER
tristate
depends on DRM
+ select CEC_CORE if DRM_DISPLAY_DP_AUX_CEC || DRM_DISPLAY_HDMI_CEC_HELPER || CEC_NOTIFIER
help
DRM helpers for display adapters.
@@ -16,6 +17,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_CEC_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -23,7 +25,6 @@ config DRM_BRIDGE_CONNECTOR
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
@@ -82,6 +83,16 @@ config DRM_DISPLAY_HDMI_AUDIO_HELPER
DRM display helpers for HDMI Audio functionality (generic HDMI Codec
implementation).
+config DRM_DISPLAY_HDMI_CEC_HELPER
+ bool
+ help
+ DRM display helpers for HDMI CEC implementation.
+
+config DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER
+ def_bool CEC_NOTIFIER
+ help
+ DRM display helpers for HDMI CEC notifiers implementation.
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index b17879b957d5..0ff4a1ad0222 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -16,6 +16,10 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
drm_hdmi_audio_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_HELPER) += \
+ drm_hdmi_cec_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) += \
+ drm_hdmi_cec_notifier_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 30c736fc0067..5eb7e9bfe361 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -3,6 +3,7 @@
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -20,6 +21,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -98,6 +100,28 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
+ /**
+ * @bridge_hdmi_cec:
+ *
+ * The bridge in the chain that implements CEC support, if any (see
+ * DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER).
+ */
+ struct drm_bridge *bridge_hdmi_cec;
};
#define to_drm_bridge_connector(x) \
@@ -186,7 +210,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
if (detect) {
- status = detect->funcs->detect(detect);
+ status = detect->funcs->detect(detect, connector);
if (hdmi)
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
@@ -433,14 +457,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(bridge, connector);
+ }
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
+
+ return bridge->funcs->dp_audio_startup(bridge, connector);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +486,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms);
+ }
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +507,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(bridge, connector);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(bridge, connector);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +525,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
- if (bridge->funcs->hdmi_audio_mute_stream)
- return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ return bridge->funcs->hdmi_audio_mute_stream(bridge, connector,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(bridge, connector,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -496,6 +555,65 @@ static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_aud
.mute_stream = drm_bridge_connector_audio_mute_stream,
};
+static int drm_bridge_connector_hdmi_cec_enable(struct drm_connector *connector, bool enable)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_enable(bridge, enable);
+}
+
+static int drm_bridge_connector_hdmi_cec_log_addr(struct drm_connector *connector, u8 logical_addr)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_log_addr(bridge, logical_addr);
+}
+
+static int drm_bridge_connector_hdmi_cec_transmit(struct drm_connector *connector,
+ u8 attempts,
+ u32 signal_free_time,
+ struct cec_msg *msg)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_transmit(bridge, attempts,
+ signal_free_time,
+ msg);
+}
+
+static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ if (!bridge->funcs->hdmi_cec_init)
+ return 0;
+
+ return bridge->funcs->hdmi_cec_init(bridge, connector);
+}
+
+static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+ .init = drm_bridge_connector_hdmi_cec_init,
+ .enable = drm_bridge_connector_hdmi_cec_enable,
+ .log_addr = drm_bridge_connector_hdmi_cec_log_addr,
+ .transmit = drm_bridge_connector_hdmi_cec_transmit,
+};
+
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
@@ -576,6 +694,61 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+
+ if (!bridge->funcs->hdmi_cec_enable ||
+ !bridge->funcs->hdmi_cec_log_addr ||
+ !bridge->funcs->hdmi_cec_transmit)
+ return ERR_PTR(-EINVAL);
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -611,22 +784,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -635,6 +792,48 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+ struct drm_bridge *bridge;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ bridge = bridge_connector->bridge_hdmi_audio;
+ else
+ bridge = bridge_connector->bridge_dp_audio;
+
+ dev = bridge->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_i2s_formats,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ ret = drmm_connector_hdmi_cec_notifier_register(connector,
+ NULL,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ ret = drmm_connector_hdmi_cec_register(connector,
+ &drm_bridge_connector_hdmi_cec_funcs,
+ bridge->hdmi_cec_adapter_name,
+ bridge->hdmi_cec_available_las,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index ec7eac6b595f..2d279e82922f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -12,6 +12,7 @@
* to perform transactions on that bus.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -57,7 +58,7 @@ static int dp_aux_ep_probe(struct device *dev)
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n");
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 56a4965e518c..3b50d817c839 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -5,6 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -96,7 +97,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +113,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +124,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +144,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +194,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +202,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +215,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +230,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +253,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +269,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dbce1c3f4969..1ecc3df7e316 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/dynamic_debug.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/iopoll.h>
@@ -327,7 +328,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +359,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -692,6 +693,34 @@ void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
/**
+ * drm_dp_dpcd_set_probe() - Set whether a probing before DPCD access is done
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable the probing if required
+ */
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable)
+{
+ WRITE_ONCE(aux->dpcd_probe_disabled, !enable);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_probe);
+
+static bool dpcd_access_needs_probe(struct drm_dp_aux *aux)
+{
+ /*
+ * HP ZR24w corrupts the first DPCD access after entering power save
+ * mode. Eg. on a read, the entire buffer will be filled with the same
+ * byte. Do a throw away read to avoid corrupting anything we care
+ * about. Afterwards things will work correctly until the monitor
+ * gets woken up and subsequently re-enters power save mode.
+ *
+ * The user pressing any button on the monitor is enough to wake it
+ * up, so there is no particularly good place to do the workaround.
+ * We just have to do it before any DPCD access and hope that the
+ * monitor doesn't power down exactly after the throw away read.
+ */
+ return !aux->is_remote && !READ_ONCE(aux->dpcd_probe_disabled);
+}
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -704,26 +733,16 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret;
- /*
- * HP ZR24w corrupts the first DPCD access after entering power save
- * mode. Eg. on a read, the entire buffer will be filled with the same
- * byte. Do a throw away read to avoid corrupting anything we care
- * about. Afterwards things will work correctly until the monitor
- * gets woken up and subsequently re-enters power save mode.
- *
- * The user pressing any button on the monitor is enough to wake it
- * up, so there is no particularly good place to do the workaround.
- * We just have to do it before any DPCD access and hope that the
- * monitor doesn't power down exactly after the throw away read.
- */
- if (!aux->is_remote) {
- ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ if (dpcd_access_needs_probe(aux)) {
+ ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
}
@@ -752,6 +771,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -774,14 +795,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -804,30 +824,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
-
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -838,12 +848,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -870,21 +949,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -1040,15 +1119,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1061,23 +1140,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1114,12 +1193,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1156,10 +1233,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1209,11 +1286,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1570,7 +1645,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1635,13 +1710,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1779,11 +1854,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2081,14 +2154,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
- drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
- /* Send a bare address packet to start the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.buffer = NULL;
- msg.size = 0;
- err = drm_dp_i2c_do_msg(aux, &msg);
+
+ if (!aux->no_zero_sized) {
+ drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ }
/*
* Reset msg.request in case in case it got
@@ -2107,6 +2183,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
+ if (j + msg.size == msgs[i].len && aux->no_zero_sized)
+ msg.request &= ~DP_AUX_I2C_MOT;
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
@@ -2124,15 +2202,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
if (err >= 0)
err = num;
- /* Send a bare address packet to close out the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.request &= ~DP_AUX_I2C_MOT;
- msg.buffer = NULL;
- msg.size = 0;
- (void)drm_dp_i2c_do_msg(aux, &msg);
+ if (!aux->no_zero_sized) {
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+ }
return err;
}
@@ -2172,13 +2252,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2192,11 +2272,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2395,11 +2471,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2422,11 +2498,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2512,11 +2588,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2774,13 +2846,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2995,12 +3065,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -3008,22 +3078,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -3050,15 +3120,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3265,8 +3335,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3290,7 +3360,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3421,16 +3491,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3445,7 +3512,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3474,7 +3541,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3509,11 +3576,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3539,7 +3602,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3555,13 +3618,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3576,7 +3633,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3585,11 +3642,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3604,7 +3657,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3629,7 +3682,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3658,7 +3711,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3793,7 +3846,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3804,11 +3857,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3820,13 +3869,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3842,15 +3885,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
- if (ret < 0)
- return ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3867,21 +3906,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
- if (ret < 0)
- return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3897,7 +3932,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3906,11 +3941,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3926,28 +3957,36 @@ EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
* Returns: %0 on success, negative error code on failure
*/
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level)
+ u32 level)
{
int ret;
- u8 buf[2] = { 0 };
+ unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
+ u8 buf[3] = { 0 };
/* The panel uses the PWM for controlling brightness levels */
- if (!bl->aux_set)
+ if (!(bl->aux_set || bl->luminance_set))
return 0;
- if (bl->lsb_reg_used) {
+ if (bl->luminance_set) {
+ level = level * 1000;
+ level &= 0xffffff;
+ buf[0] = (level & 0x0000ff);
+ buf[1] = (level & 0x00ff00) >> 8;
+ buf[2] = (level & 0xff0000) >> 16;
+ offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ } else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
} else {
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3965,22 +4004,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -4005,7 +4044,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
* Returns: %0 on success, negative error code on failure.
*/
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- const u16 level)
+ const u32 level)
{
int ret;
u8 dpcd_buf;
@@ -4015,16 +4054,20 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
else
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
+ if (bl->luminance_set)
+ dpcd_buf |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -4032,8 +4075,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4088,8 +4131,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
@@ -4122,14 +4165,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
@@ -4154,8 +4197,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
@@ -4177,11 +4220,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 *current_mode)
{
int ret;
- u8 buf[2];
+ u8 buf[3];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4194,17 +4237,37 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
- aux->name, ret);
- return ret < 0 ? ret : -EIO;
- }
+ if (bl->luminance_set) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
- if (bl->lsb_reg_used)
- return (buf[0] << 8) | buf[1];
- else
- return buf[0];
+ /*
+ * Incase luminance is set we want to send the value back in nits but
+ * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we
+ * need to divide by 1000.
+ */
+ return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000;
+ } else {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ buf, size);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
+
+ if (bl->lsb_reg_used)
+ return (buf[0] << 8) | buf[1];
+ else
+ return buf[0];
+ }
}
/*
@@ -4219,10 +4282,12 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
* interface.
* @aux: The DP aux device to use for probing
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
+ * @max_luminance: max luminance when need luminance is set as true
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
* @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
+ * @need_luminance: Tells us if a we want to manipulate backlight using luminance values
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
* along with also probing the current and maximum supported brightness levels.
@@ -4234,8 +4299,9 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
*/
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode)
+ u32 *current_level, u8 *current_mode, bool need_luminance)
{
int ret;
@@ -4245,18 +4311,26 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl
bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
+ if ((edp_dpcd[0] & DP_EDP_15) && edp_dpcd[3] &
+ (DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && need_luminance)
+ bl->luminance_set = true;
/* Sanity check caps */
- if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP) &&
+ !bl->luminance_set) {
drm_dbg_kms(aux->drm_dev,
- "%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
+ "%s: Panel does not support AUX, PWM or luminance-based brightness control. Aborting\n",
aux->name);
return -EINVAL;
}
- ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
- if (ret < 0)
- return ret;
+ if (bl->luminance_set) {
+ bl->max = max_luminance;
+ } else {
+ ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
+ if (ret < 0)
+ return ret;
+ }
ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
@@ -4335,7 +4409,7 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
{
struct dp_aux_backlight *bl;
struct backlight_properties props = { 0 };
- u16 current_level;
+ u32 current_level;
u8 current_mode;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
int ret;
@@ -4343,8 +4417,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
@@ -4359,8 +4433,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
bl->aux = aux;
- ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(aux, &bl->info, 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
@@ -4385,8 +4459,9 @@ EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
-static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
- int symbol_size, bool is_mst)
+static int drm_dp_link_data_symbol_cycles(int lane_count, int pixels,
+ int bpp_x16, int symbol_size,
+ bool is_mst)
{
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
int align = is_mst ? 4 / lane_count : 1;
@@ -4394,22 +4469,42 @@ static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
return ALIGN(cycles, align);
}
-static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
- int bpp_x16, int symbol_size, bool is_mst)
+/**
+ * drm_dp_link_symbol_cycles - calculate the link symbol count with/without dsc
+ * @lane_count: DP link lane count
+ * @pixels: number of pixels in a scanline
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
+ * @bpp_x16: bits per pixel in .4 binary fixed format
+ * @symbol_size: DP symbol size
+ * @is_mst: %true for MST and %false for SST
+ *
+ * Calculate the link symbol cycles for both DSC (@dsc_slice_count !=0) and
+ * non-DSC case (@dsc_slice_count == 0) and return the count.
+ */
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
{
+ int slice_count = dsc_slice_count ? : 1;
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
- int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
- bpp_x16, symbol_size, is_mst);
- int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+ int slice_data_cycles = drm_dp_link_data_symbol_cycles(lane_count,
+ slice_pixels,
+ bpp_x16,
+ symbol_size,
+ is_mst);
+ int slice_eoc_cycles = 0;
+
+ if (dsc_slice_count)
+ slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
return slice_count * (slice_data_cycles + slice_eoc_cycles);
}
+EXPORT_SYMBOL(drm_dp_link_symbol_cycles);
/**
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
* @lane_count: DP link lane count
* @hactive: pixel count of the active period in one scanline of the stream
- * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
* @bpp_x16: bits per pixel in .4 binary fixed point
* @flags: DRM_DP_OVERHEAD_x flags
*
@@ -4423,7 +4518,7 @@ static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_c
* as well as the stream's
* - @hactive timing
* - @bpp_x16 color depth
- * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * - compression mode (@dsc_slice_count != 0)
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
* channel coding efficiency, for that see
* @drm_dp_link_bw_channel_coding_efficiency().
@@ -4478,15 +4573,10 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
(flags & DRM_DP_BW_OVERHEAD_FEC));
- if (flags & DRM_DP_BW_OVERHEAD_DSC)
- symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
- dsc_slice_count,
- bpp_x16, symbol_size,
- is_mst);
- else
- symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
- bpp_x16, symbol_size,
- is_mst);
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
overhead * 16),
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3a1f1ffc7b55..64e5c176d5cc 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -23,6 +23,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -2192,24 +2193,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2744,14 +2741,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3624,7 +3620,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3679,10 +3675,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3697,7 +3693,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3763,8 +3759,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3813,18 +3809,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3883,8 +3879,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3922,9 +3918,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4881,9 +4877,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4972,23 +4968,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6112,14 +6109,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6157,8 +6154,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6180,11 +6177,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 90fe07a89260..43f13a7c79b9 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/types.h>
@@ -222,7 +223,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +914,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +922,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1040,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1053,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1096,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1197,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1216,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1301,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
@@ -1920,7 +1921,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
- ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
#endif
for (i = 0; i < max_group_count; i++) {
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 6900f4dac520..05996c526a8a 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -6,6 +6,7 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index 05afc9f0bdd6..7d78b02c1446 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -3,6 +3,7 @@
* Copyright (c) 2024 Linaro Ltd
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -103,7 +104,8 @@ static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
connector->hdmi_audio.plugged_cb = fn;
connector->hdmi_audio.plugged_cb_dev = codec_dev;
- fn(codec_dev, connector->hdmi_audio.last_state);
+ if (fn)
+ fn(codec_dev, connector->hdmi_audio.last_state);
mutex_unlock(&connector->hdmi_audio.lock);
@@ -142,6 +144,7 @@ static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
* @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
* @funcs: callbacks for this HDMI Codec
* @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @i2s_formats: set of I2S formats (use 0 for a bus-specific set)
* @spdif_playback: set if HDMI codec has S/PDIF playback port
* @dai_port: sound DAI port, -1 if it is not enabled
*
@@ -154,6 +157,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int dai_port)
{
@@ -161,6 +165,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
.ops = &drm_connector_hdmi_audio_ops,
.max_i2s_channels = max_i2s_playback_channels,
.i2s = !!max_i2s_playback_channels,
+ .i2s_formats = i2s_formats,
.spdif = spdif_playback,
.no_i2s_capture = true,
.no_spdif_capture = true,
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
new file mode 100644
index 000000000000..3651ad0f76e0
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+
+struct drm_connector_hdmi_cec_data {
+ struct cec_adapter *adapter;
+ const struct drm_connector_hdmi_cec_funcs *funcs;
+};
+
+static int drm_connector_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->enable(connector, enable);
+}
+
+static int drm_connector_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->log_addr(connector, logical_addr);
+}
+
+static int drm_connector_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->transmit(connector, attempts, signal_free_time, msg);
+}
+
+static const struct cec_adap_ops drm_connector_hdmi_cec_adap_ops = {
+ .adap_enable = drm_connector_hdmi_cec_adap_enable,
+ .adap_log_addr = drm_connector_hdmi_cec_adap_log_addr,
+ .adap_transmit = drm_connector_hdmi_cec_adap_transmit,
+};
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_invalidate(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_phys_addr_invalidate(data->adapter);
+}
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_s_phys_addr(data->adapter, addr, false);
+}
+
+static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_unregister_adapter(data->adapter);
+
+ if (data->funcs->uninit)
+ data->funcs->uninit(connector);
+
+ kfree(data);
+ connector->cec.data = NULL;
+}
+
+static struct drm_connector_cec_funcs drm_connector_hdmi_cec_adapter_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_adapter_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_adapter_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev)
+{
+ struct drm_connector_hdmi_cec_data *data;
+ struct cec_connector_info conn_info;
+ struct cec_adapter *cec_adap;
+ int ret;
+
+ if (!funcs->init || !funcs->enable || !funcs->log_addr || !funcs->transmit)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->funcs = funcs;
+
+ cec_adap = cec_allocate_adapter(&drm_connector_hdmi_cec_adap_ops, connector, name,
+ CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO,
+ available_las ? : CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec_adap);
+ if (ret < 0)
+ goto err_free;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(cec_adap, &conn_info);
+
+ data->adapter = cec_adap;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = data;
+ connector->cec.funcs = &drm_connector_hdmi_cec_adapter_funcs;
+
+ ret = funcs->init(connector);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ /*
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
+ *
+ * However, the CEC framework cleans up the CEC adapter only when the
+ * last user has closed its file descriptor, so we don't need to handle
+ * it in DRM.
+ *
+ * Before that CEC framework makes sure that even if the userspace
+ * still holds CEC device open, all calls will be shortcut via
+ * cec_is_registered(), making sure that there is no access to the
+ * freed memory.
+ */
+ ret = cec_register_adapter(cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_adapter_unregister,
+ connector);
+
+err_delete_adapter:
+ cec_delete_adapter(cec_adap);
+
+ connector->cec.data = NULL;
+
+ mutex_unlock(&connector->cec.mutex);
+
+err_free:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_register);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_received_msg(data->adapter, msg);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_received_msg);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_attempt_done(data->adapter, status);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_attempt_done);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_done(data->adapter, status,
+ arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_done);
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
new file mode 100644
index 000000000000..31b8e4a93e24
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_invalidate(struct drm_connector *connector)
+{
+ cec_notifier_phys_addr_invalidate(connector->cec.data);
+}
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ cec_notifier_set_phys_addr(connector->cec.data, addr);
+}
+
+static void drm_connector_hdmi_cec_notifier_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+
+ cec_notifier_conn_unregister(connector->cec.data);
+ connector->cec.data = NULL;
+}
+
+static const struct drm_connector_cec_funcs drm_connector_cec_notifier_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_notifier_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_notifier_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(dev, port_name, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = notifier;
+ connector->cec.funcs = &drm_connector_cec_notifier_funcs;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_notifier_unregister,
+ connector);
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_notifier_register);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 74dd4d01dd9b..a237dc55805d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -44,7 +45,7 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf))
+ connector->display_info.hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
@@ -256,3 +257,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
}
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
+
+struct drm_hdmi_acr_n_cts_entry {
+ unsigned int n;
+ unsigned int cts;
+};
+
+struct drm_hdmi_acr_data {
+ unsigned long tmds_clock_khz;
+ struct drm_hdmi_acr_n_cts_entry n_cts_32k,
+ n_cts_44k1,
+ n_cts_48k;
+};
+
+static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
+ {
+ /* "Other" entry */
+ .n_cts_32k = { .n = 4096, },
+ .n_cts_44k1 = { .n = 6272, },
+ .n_cts_48k = { .n = 6144, },
+ }, {
+ .tmds_clock_khz = 25175,
+ .n_cts_32k = { .n = 4576, .cts = 28125, },
+ .n_cts_44k1 = { .n = 7007, .cts = 31250, },
+ .n_cts_48k = { .n = 6864, .cts = 28125, },
+ }, {
+ .tmds_clock_khz = 25200,
+ .n_cts_32k = { .n = 4096, .cts = 25200, },
+ .n_cts_44k1 = { .n = 6272, .cts = 28000, },
+ .n_cts_48k = { .n = 6144, .cts = 25200, },
+ }, {
+ .tmds_clock_khz = 27000,
+ .n_cts_32k = { .n = 4096, .cts = 27000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30000, },
+ .n_cts_48k = { .n = 6144, .cts = 27000, },
+ }, {
+ .tmds_clock_khz = 27027,
+ .n_cts_32k = { .n = 4096, .cts = 27027, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30030, },
+ .n_cts_48k = { .n = 6144, .cts = 27027, },
+ }, {
+ .tmds_clock_khz = 54000,
+ .n_cts_32k = { .n = 4096, .cts = 54000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60000, },
+ .n_cts_48k = { .n = 6144, .cts = 54000, },
+ }, {
+ .tmds_clock_khz = 54054,
+ .n_cts_32k = { .n = 4096, .cts = 54054, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60060, },
+ .n_cts_48k = { .n = 6144, .cts = 54054, },
+ }, {
+ .tmds_clock_khz = 74176,
+ .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
+ .n_cts_44k1 = { .n = 17836, .cts = 234375, },
+ .n_cts_48k = { .n = 11648, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 74250,
+ .n_cts_32k = { .n = 4096, .cts = 74250, },
+ .n_cts_44k1 = { .n = 6272, .cts = 82500, },
+ .n_cts_48k = { .n = 6144, .cts = 74250, },
+ }, {
+ .tmds_clock_khz = 148352,
+ .n_cts_32k = { .n = 11648, .cts = 421875, },
+ .n_cts_44k1 = { .n = 8918, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 148500,
+ .n_cts_32k = { .n = 4096, .cts = 148500, },
+ .n_cts_44k1 = { .n = 6272, .cts = 165000, },
+ .n_cts_48k = { .n = 6144, .cts = 148500, },
+ }, {
+ .tmds_clock_khz = 296703,
+ .n_cts_32k = { .n = 5824, .cts = 421875, },
+ .n_cts_44k1 = { .n = 4459, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 281250, },
+ }, {
+ .tmds_clock_khz = 297000,
+ .n_cts_32k = { .n = 3072, .cts = 222750, },
+ .n_cts_44k1 = { .n = 4704, .cts = 247500, },
+ .n_cts_48k = { .n = 5120, .cts = 247500, },
+ }, {
+ .tmds_clock_khz = 593407,
+ .n_cts_32k = { .n = 5824, .cts = 843750, },
+ .n_cts_44k1 = { .n = 8918, .cts = 937500, },
+ .n_cts_48k = { .n = 5824, .cts = 562500, },
+ }, {
+ .tmds_clock_khz = 594000,
+ .n_cts_32k = { .n = 3072, .cts = 445500, },
+ .n_cts_44k1 = { .n = 9408, .cts = 990000, },
+ .n_cts_48k = { .n = 6144, .cts = 594000, },
+ },
+};
+
+static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
+{
+ int i;
+
+ /* skip the "other" entry */
+ for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
+ if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
+ *
+ * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
+ * @sample_rate: audio sample rate
+ * @out_n: a pointer to write the N value
+ * @out_cts: a pointer to write the CTS value
+ *
+ * Get the N and CTS values (either by calculating them or by returning data
+ * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
+ * Capture and Regeneration".
+ *
+ * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
+ * on how to select Fs for non-L-PCM formats.
+ */
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts)
+{
+ /* be a bit more tolerant, especially for the 1.001 entries */
+ unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
+ const struct drm_hdmi_acr_n_cts_entry *entry;
+ unsigned int n, cts, mult;
+ int tmds_idx;
+
+ tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
+
+ /*
+ * Don't change the order, 192 kHz is divisible by 48k and 32k, but it
+ * should use 48k entry.
+ */
+ if (sample_rate % 48000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
+ mult = sample_rate / 48000;
+ } else if (sample_rate % 44100 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
+ mult = sample_rate / 44100;
+ } else if (sample_rate % 32000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
+ mult = sample_rate / 32000;
+ } else {
+ entry = NULL;
+ }
+
+ if (entry) {
+ n = entry->n * mult;
+ cts = entry->cts;
+ } else {
+ /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
+ n = 128 * sample_rate / 1000;
+ cts = 0;
+ }
+
+ if (!cts)
+ cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
+ 128 * sample_rate);
+
+ *out_n = n;
+ *out_cts = cts;
+}
+EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index c205f37da1e1..a561f124be99 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -1,15 +1,311 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -115,6 +411,11 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return false;
}
+ if (drm_mode_is_420_only(info, mode) && format != HDMI_COLORSPACE_YUV420) {
+ drm_dbg_kms(dev, "Mode can be only supported in YUV420 format.\n");
+ return false;
+ }
+
switch (format) {
case HDMI_COLORSPACE_RGB:
drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
@@ -145,9 +446,36 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return true;
case HDMI_COLORSPACE_YUV420:
- /* TODO: YUV420 is unsupported at the moment. */
- drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
- return false;
+ drm_dbg_kms(dev, "YUV420 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR420)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV420.\n");
+ return false;
+ }
+
+ if (!drm_mode_is_420(info, mode)) {
+ drm_dbg_kms(dev, "Mode cannot be supported in YUV420 format.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ if (bpc == 16 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) {
+ drm_dbg_kms(dev, "16 BPC but sink doesn't support Deep Color 48.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV420 format supported in that configuration.\n");
+
+ return true;
case HDMI_COLORSPACE_YUV422:
drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
@@ -253,8 +581,9 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret;
- drm_dbg_kms(dev, "Trying %s output format\n",
- drm_hdmi_connector_get_output_format_name(fmt));
+ drm_dbg_kms(dev, "Trying %s output format with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
@@ -271,7 +600,7 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
return false;
}
- drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_dbg_kms(dev, "%s output format supported with %u bpc (TMDS char rate: %llu Hz)\n",
drm_hdmi_connector_get_output_format_name(fmt),
bpc, conn_state->hdmi.tmds_char_rate);
@@ -279,23 +608,35 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
}
static int
-hdmi_compute_format(const struct drm_connector *connector,
- struct drm_connector_state *conn_state,
- const struct drm_display_mode *mode,
- unsigned int bpc)
+hdmi_compute_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int max_bpc, enum hdmi_colorspace fmt)
{
struct drm_device *dev = connector->dev;
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ ret = hdmi_try_format_bpc(connector, conn_state, mode, bpc, fmt);
+ if (!ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+ conn_state->hdmi.output_format = fmt;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
- /*
- * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
- * devices, for modes that only support YCbCr420.
- */
- if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
- conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
return 0;
}
- drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+ drm_dbg_kms(dev, "Failed. %s output format not supported for any bpc count.\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
return -EINVAL;
}
@@ -305,33 +646,29 @@ hdmi_compute_config(const struct drm_connector *connector,
struct drm_connector_state *conn_state,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
unsigned int max_bpc = clamp_t(unsigned int,
conn_state->max_bpc,
8, connector->max_bpc);
- unsigned int bpc;
int ret;
- for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
- drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
-
- ret = hdmi_compute_format(connector, conn_state, mode, bpc);
- if (ret)
- continue;
-
- conn_state->hdmi.output_bpc = bpc;
-
- drm_dbg_kms(dev,
- "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
- mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
- conn_state->hdmi.output_bpc,
- drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
- conn_state->hdmi.tmds_char_rate);
-
- return 0;
+ ret = hdmi_compute_format_bpc(connector, conn_state, mode, max_bpc,
+ HDMI_COLORSPACE_RGB);
+ if (ret) {
+ if (connector->ycbcr_420_allowed) {
+ ret = hdmi_compute_format_bpc(connector, conn_state,
+ mode, max_bpc,
+ HDMI_COLORSPACE_YUV420);
+ if (ret)
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format doesn't work.\n");
+ } else {
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format not allowed for connector.\n");
+ ret = -EINVAL;
+ }
}
- return -EINVAL;
+ return ret;
}
static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
@@ -506,12 +843,12 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
return 0;
- new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
-
ret = hdmi_compute_config(connector, new_conn_state, mode);
if (ret)
return ret;
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
ret = hdmi_generate_infoframes(connector, new_conn_state);
if (ret)
return ret;
@@ -789,9 +1126,10 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
const struct drm_edid *drm_edid;
if (status == connector_status_disconnected) {
- // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ // TODO: also handle scramber, HDMI sink disconnected.
drm_connector_hdmi_audio_plugged_notify(connector, false);
drm_edid_connector_update(connector, NULL);
+ drm_connector_cec_phys_addr_invalidate(connector);
return;
}
@@ -805,8 +1143,9 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
drm_edid_free(drm_edid);
if (status == connector_status_connected) {
- // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ // TODO: also handle scramber, HDMI sink is now connected.
drm_connector_hdmi_audio_plugged_notify(connector, true);
+ drm_connector_cec_phys_addr_set(connector);
}
}
@@ -816,7 +1155,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 6d2f244e5830..df878aad4a36 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..cd15cf52f0c9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,7 +26,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-
+#include <linux/export.h>
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
@@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5302ab324898..ef56b474acf5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,6 +25,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/ktime.h>
@@ -1160,11 +1161,10 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
- struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
@@ -1224,9 +1224,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-
- drm_atomic_bridge_chain_post_disable(bridge, state);
}
+}
+
+static void
+crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -1274,6 +1280,68 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+static void
+encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
+
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /*
+ * We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
+ }
+}
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ encoder_bridge_disable(dev, state);
+
+ crtc_disable(dev, state);
+
+ encoder_bridge_post_disable(dev, state);
+}
+
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
@@ -1483,28 +1551,44 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
-/**
- * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @state: atomic state object being committed
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy CRTC helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *state)
+static void
+encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ if (!new_conn_state->best_encoder)
+ continue;
+
+ if (!new_conn_state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
+ continue;
+
+ encoder = new_conn_state->best_encoder;
+
+ drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
+ }
+}
+
+static void
+crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1528,6 +1612,14 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(crtc);
}
}
+}
+
+static void
+encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
@@ -1552,7 +1644,6 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1565,6 +1656,30 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_atomic_bridge_chain_enable(bridge, state);
}
+}
+
+/**
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy CRTC helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ encoder_bridge_pre_enable(dev, state);
+
+ crtc_enable(dev, state);
+
+ encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
@@ -3409,6 +3524,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
* For drivers which optimize out unnecessary modesets this will result in
* a no-op commit, achieving nothing.
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 519228eb1095..7142e163e618 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c2726af6698e..ecc73d52bfae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -36,6 +36,7 @@
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22aa015df387..a2556d16bed6 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,6 +28,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_auth.h>
@@ -95,7 +96,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -103,7 +104,6 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
@@ -118,13 +118,12 @@ int drm_authmagic(struct drm_device *dev, void *data,
drm_dbg_core(dev, "%u\n", auth->magic);
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
@@ -248,41 +247,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
if (drm_is_current_master_locked(file_priv))
- goto out_unlock;
+ return ret;
- if (dev->master) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (dev->master)
+ return -EBUSY;
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!file_priv->master)
+ return -EINVAL;
- if (!file_priv->is_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
+ if (!file_priv->is_master)
+ return drm_new_set_master(dev, file_priv);
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_set_master(dev, file_priv, false);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -299,33 +290,27 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
- if (!drm_is_current_master_locked(file_priv)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!drm_is_current_master_locked(file_priv))
+ return -EINVAL;
- if (!dev->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!dev->master)
+ return -EINVAL;
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_drop_master(dev, file_priv);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -337,7 +322,7 @@ int drm_master_open(struct drm_file *file_priv)
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
@@ -345,7 +330,6 @@ int drm_master_open(struct drm_file *file_priv)
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
- mutex_unlock(&dev->master_mutex);
return ret;
}
@@ -355,7 +339,7 @@ void drm_master_release(struct drm_file *file_priv)
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
@@ -376,7 +360,6 @@ out:
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- mutex_unlock(&dev->master_mutex);
}
/**
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 6e74de833466..6852d73c931c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -75,6 +75,12 @@
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
+ *
+ * When a KMS client is performing front-buffer rendering, it should set
+ * FB_ID to the same front-buffer FB on each atomic commit. This implies
+ * to the driver that it needs to re-read the same FB again. Otherwise
+ * drivers which do not employ continuously repeated scanout cycles might
+ * not update the screen.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index fa2794217a90..4bde00083047 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,7 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -198,13 +200,106 @@
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ if (bridge->funcs->destroy)
+ bridge->funcs->destroy(bridge);
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
+
+/**
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
+
+ drm_bridge_get(bridge);
+
mutex_init(&bridge->hpd_mutex);
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
@@ -252,6 +347,8 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
+
+ drm_bridge_put(bridge);
}
EXPORT_SYMBOL(drm_bridge_remove);
@@ -280,6 +377,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -312,11 +414,17 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
if (!encoder || !bridge)
return -EINVAL;
- if (previous && (!previous->dev || previous->encoder != encoder))
- return -EINVAL;
+ drm_bridge_get(bridge);
- if (bridge->dev)
- return -EBUSY;
+ if (previous && (!previous->dev || previous->encoder != encoder)) {
+ ret = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ if (bridge->dev) {
+ ret = -EBUSY;
+ goto err_put_bridge;
+ }
bridge->dev = encoder->dev;
bridge->encoder = encoder;
@@ -327,12 +435,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -365,6 +473,8 @@ err_reset_bridge:
"failed to attach bridge %pOF to encoder %s\n",
bridge->of_node, encoder->name);
+err_put_bridge:
+ drm_bridge_put(bridge);
return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -377,7 +487,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -385,6 +495,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
list_del(&bridge->chain_node);
bridge->dev = NULL;
+ drm_bridge_put(bridge);
}
/**
@@ -1116,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
+ * @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
@@ -1126,12 +1238,13 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
* The detection status on success, or connector_status_unknown if the bridge
* doesn't support output detection.
*/
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
return connector_status_unknown;
- return bridge->funcs->detect(bridge);
+ return bridge->funcs->detect(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_detect);
@@ -1300,6 +1413,92 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+/**
+ * devm_drm_put_bridge - Release a bridge reference obtained via devm
+ * @dev: device that got the bridge via devm
+ * @bridge: pointer to a struct drm_bridge obtained via devm
+ *
+ * Same as drm_bridge_put() for bridge pointers obtained via devm functions
+ * such as devm_drm_bridge_alloc().
+ *
+ * This function is a temporary workaround and MUST NOT be used. Manual
+ * handling of bridge lifetime is inherently unsafe.
+ */
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
+{
+ devm_release_action(dev, drm_bridge_put_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_put_bridge);
+
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ if (bridge->of_node)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..420f29cf3e54
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/export.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 241c855f891f..a94061f373de 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -5,6 +5,7 @@
#include <kunit/test-bug.h>
+#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -405,6 +406,49 @@ drm_get_buddy(struct drm_buddy_block *block)
EXPORT_SYMBOL(drm_get_buddy);
/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freelist.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+
+ size = mm->size;
+ for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
+ }
+
+ for (i = 0; i <= mm->max_order; ++i) {
+ struct drm_buddy_block *block;
+
+ list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+ if (is_clear != drm_buddy_block_is_clear(block)) {
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
+/**
* drm_buddy_free_block - free a block
*
* @mm: DRM buddy manager
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 7051c9c909c2..ea1d2d5d2c66 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -93,8 +93,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#elif defined(__powerpc__)
unsigned long i;
@@ -139,8 +138,7 @@ drm_clflush_sg(struct sg_table *st)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
@@ -172,8 +170,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..3fa38d4ac70b 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -174,7 +175,7 @@ EXPORT_SYMBOL(drm_client_release);
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
@@ -252,7 +253,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -278,7 +279,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -303,34 +304,17 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int
-drm_client_buffer_vmap(struct drm_client_buffer *buffer,
- struct iosys_map *map_copy)
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
int ret;
- drm_gem_lock(gem);
-
- ret = drm_gem_pin_locked(gem);
- if (ret)
- goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap(buffer->gem, &buffer->map);
if (ret)
- goto err_drm_gem_vmap;
-
- drm_gem_unlock(gem);
-
- *map_copy = *map;
+ return ret;
+ *map_copy = buffer->map;
return 0;
-
-err_drm_gem_vmap:
- drm_gem_unpin_locked(buffer->gem);
-err_drm_gem_pin_locked:
- drm_gem_unlock(gem);
- return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -344,13 +328,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
-
- drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
- drm_gem_unpin_locked(gem);
- drm_gem_unlock(gem);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index bd93cd93d519..c83196ad8b59 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index aca442c25209..9c2c3b0c8c47 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -8,6 +8,8 @@
*/
#include "drm/drm_modeset_lock.h"
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -39,7 +41,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +75,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +120,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +133,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +147,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +162,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +242,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +268,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +315,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +351,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +362,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +379,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +414,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +434,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +469,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +500,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +542,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +582,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +617,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +661,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +704,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +712,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -799,8 +822,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -851,7 +873,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -868,10 +890,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -902,11 +926,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -938,7 +962,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1219,11 +1243,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3969dc548cff..37a3270bc3c2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
@@ -28,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+#include <kunit/visibility.h>
#include "drm_crtc_internal.h"
@@ -494,6 +496,7 @@ const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
return color_encoding_name[encoding];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_encoding_name);
/**
* drm_get_color_range_name - return a string for color range
@@ -509,6 +512,7 @@ const char *drm_get_color_range_name(enum drm_color_range range)
return color_range_name[range];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_range_name);
/**
* drm_plane_create_color_properties - color encoding related plane properties
@@ -630,3 +634,209 @@ int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
+
+/*
+ * Gamma-LUT programming
+ */
+
+/**
+ * drm_crtc_load_gamma_888 - Programs gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component.
+ */
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_gamma(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_888);
+
+/**
+ * drm_crtc_load_gamma_565_from_888 - Programs gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/6/5.
+ */
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 4 + i / 16].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i) {
+ g = lut[i * 4 + i / 16].green;
+ set_gamma(crtc, i, 0, g, 0);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_565_from_888);
+
+/**
+ * drm_crtc_load_gamma_555_from_888 - Programs gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/5/5.
+ */
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 8 + i / 4].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_555_from_888);
+
+static void fill_gamma_888(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 8) | r;
+ g = (g << 8) | g;
+ b = (b << 8) | b;
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_888 - Programs a default gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_gamma_888(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_888);
+
+static void fill_gamma_565(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 10) | (g << 4) | (g >> 2);
+ b = (b << 11) | (b << 6) | (b << 1) | (b >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_565 - Programs a default gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_565(crtc, i, i, i, i, set_gamma);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i)
+ fill_gamma_565(crtc, i, 0, i, 0, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_565);
+
+static void fill_gamma_555(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 11) | (g << 6) | (g << 1) | (g >> 4);
+ b = (b << 11) | (b << 6) | (b << 1) | (r >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_555 - Programs a default gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_555(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_555);
+
+/*
+ * Color-LUT programming
+ */
+
+/**
+ * drm_crtc_load_palette_8 - Programs palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The palette to program
+ * @set_palette: Callback for programming the hardware palette
+ *
+ * Programs the palette specified in @lut to hardware. The input palette
+ * must have 256 entries per color component.
+ */
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_palette(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_palette_8);
+
+static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
+ drm_crtc_set_lut_func set_palette)
+{
+ u16 Y = (i << 8) | i; // relative luminance
+
+ set_palette(crtc, i, Y, Y, Y);
+}
+
+/**
+ * drm_crtc_fill_palette_8 - Programs a default palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default palette to hardware.
+ */
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_palette_8(crtc, i, set_palette);
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_8);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 48b08c9611a7..272d6254ea47 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -279,6 +280,7 @@ static int drm_connector_init_only(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->cec.mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
@@ -702,6 +704,46 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_cec_phys_addr_invalidate - invalidate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Invalidated CEC physical address set for this DRM connector.
+ */
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector)
+{
+ mutex_lock(&connector->cec.mutex);
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_invalidate)
+ connector->cec.funcs->phys_addr_invalidate(connector);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_invalidate);
+
+/**
+ * drm_connector_cec_phys_addr_set - propagate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Propagate CEC physical address from the display_info to this DRM connector.
+ */
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector)
+{
+ u16 addr;
+
+ mutex_lock(&connector->cec.mutex);
+
+ addr = connector->display_info.source_physical_address;
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_set)
+ connector->cec.funcs->phys_addr_set(connector, addr);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_set);
+
+/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
@@ -1645,7 +1687,7 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* structure from userspace. This is received as blob and stored in
* &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
* sink metadata in &struct hdr_sink_metadata, as
- * &drm_connector.hdr_sink_metadata. Driver uses
+ * &drm_connector.display_info.hdr_sink_metadata. Driver uses
* drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
* hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
* HDMI encoder.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 44a5a36806e3..6a49e7a0ab84 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -30,6 +30,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b2178864c7e..365cf337529f 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -44,6 +44,9 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
+static struct dentry *accel_debugfs_root;
+static struct dentry *drm_debugfs_root;
+
/***************************************************
* Initialization, etc.
**************************************************/
@@ -77,14 +80,15 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s %*s\n",
+ "%20s %5s %3s master a %5s %10s %*s %20s\n",
"command",
"tgid",
"dev",
"uid",
"magic",
DRM_CLIENT_NAME_MAX_LEN,
- "name");
+ "name",
+ "id");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -100,7 +104,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s %20llu\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
@@ -109,7 +113,8 @@ static int drm_clients_info(struct seq_file *m, void *data)
from_kuid_munged(seq_user_ns(m), uid),
priv->magic,
DRM_CLIENT_NAME_MAX_LEN,
- priv->client_name ? priv->client_name : "<unset>");
+ priv->client_name ? priv->client_name : "<unset>",
+ priv->client_id);
rcu_read_unlock();
mutex_unlock(&priv->client_name_lock);
}
@@ -285,16 +290,120 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
+void drm_debugfs_bridge_params(void)
+{
+ drm_bridge_debugfs_params(drm_debugfs_root);
+}
+
+void drm_debugfs_init_root(void)
+{
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ accel_debugfs_root = debugfs_create_dir("accel", NULL);
+#endif
+}
+
+void drm_debugfs_remove_root(void)
+{
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ debugfs_remove(accel_debugfs_root);
+#endif
+ debugfs_remove(drm_debugfs_root);
+}
+
+static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused)
+{
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_file *file = m->private;
+
+ if (!file)
+ return -EINVAL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ seq_printf(m, "pid: %d\n", task ? task->pid : 0);
+ seq_printf(m, "comm: %s\n", task ? task->comm : "Unset");
+ rcu_read_unlock();
+ return 0;
+}
+
+static int drm_debufs_proc_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, drm_debugfs_proc_info_show, inode->i_private);
+}
+
+static const struct file_operations drm_debugfs_proc_info_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debufs_proc_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * drm_debugfs_clients_add - Add a per client debugfs directory
+ * @file: drm_file for a client
+ *
+ * Create the debugfs directory for each client. This will be used to populate
+ * driver specific data for each client.
+ *
+ * Also add the process information debugfs file for each client to tag
+ * which client belongs to which process.
+ */
+void drm_debugfs_clients_add(struct drm_file *file)
+{
+ char *client;
+
+ client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id);
+ if (!client)
+ return;
+
+ /* Create a debugfs directory for the client in root on drm debugfs */
+ file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root);
+ kfree(client);
+
+ debugfs_create_file("proc_info", 0444, file->debugfs_client, file,
+ &drm_debugfs_proc_info_fops);
+
+ client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique);
+ if (!client)
+ return;
+
+ /* Create a link from client_id to the drm device this client id belongs to */
+ debugfs_create_symlink("device", file->debugfs_client, client);
+ kfree(client);
+}
+
+/**
+ * drm_debugfs_clients_remove - removes all debugfs directories and files
+ * @file: drm_file for a client
+ *
+ * Removes the debugfs directories recursively from the client directory.
+ *
+ * There is also a possibility that debugfs files are open while the drm_file
+ * is released.
+ */
+void drm_debugfs_clients_remove(struct drm_file *file)
+{
+ debugfs_remove_recursive(file->debugfs_client);
+ file->debugfs_client = NULL;
+}
+
/**
* drm_debugfs_dev_init - create debugfs directory for the device
* @dev: the device which we want to create the directory for
- * @root: the parent directory depending on the device type
*
* Creates the debugfs directory for the device under the given root directory.
*/
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+void drm_debugfs_dev_init(struct drm_device *dev)
{
- dev->debugfs_root = debugfs_create_dir(dev->unique, root);
+ if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root);
+ else
+ dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root);
}
/**
@@ -321,14 +430,13 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_atomic_debugfs_init(dev);
}
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
struct drm_device *dev = minor->dev;
char name[64];
sprintf(name, "%d", minor_id);
- minor->debugfs_symlink = debugfs_create_symlink(name, root,
+ minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root,
dev->unique);
/* TODO: Only for compatibility with drivers */
@@ -740,40 +848,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +863,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index bbc3bc4ba844..6b43b1cf2327 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,6 +29,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index aee1b86a73c1..957dd0619f5c 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -66,6 +66,7 @@ struct drm_edid;
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
@@ -114,20 +115,32 @@ struct displayid_tiled_block {
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
+ __le16 hactive;
+ __le16 hblank;
+ __le16 hsync;
+ __le16 hsw;
+ __le16 vactive;
+ __le16 vblank;
+ __le16 vsync;
+ __le16 vsw;
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[];
-};
+} __packed;
+
+struct displayid_formula_timings_9 {
+ u8 flags;
+ __le16 hactive;
+ __le16 vactive;
+ u8 vrefresh;
+} __packed;
+
+struct displayid_formula_timing_block {
+ struct displayid_block base;
+ struct displayid_formula_timings_9 timings[];
+} __packed;
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 385eb5e10047..9dc0408fbbea 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -13,85 +13,7 @@
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 17fc5dc708f4..cdd591b11488 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -29,17 +29,20 @@
#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sprintf.h>
#include <linux/srcu.h>
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -69,8 +72,6 @@ DEFINE_XARRAY_ALLOC(drm_minors_xa);
*/
static bool drm_core_init_complete;
-static struct dentry *drm_debugfs_root;
-
DEFINE_STATIC_SRCU(drm_unplug_srcu);
/*
@@ -183,8 +184,7 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
return 0;
if (minor->type != DRM_MINOR_ACCEL) {
- ret = drm_debugfs_register(minor, minor->index,
- drm_debugfs_root);
+ ret = drm_debugfs_register(minor, minor->index);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_debugfs;
@@ -500,6 +500,25 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
/*
* Available recovery methods for wedged device. To be sent along with device
* wedged uevent.
@@ -518,10 +537,15 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
}
}
+#define WEDGE_STR_LEN 32
+#define PID_STR_LEN 15
+#define COMM_STR_LEN (TASK_COMM_LEN + 5)
+
/**
* drm_dev_wedged_event - generate a device wedged uevent
* @dev: DRM device
* @method: method(s) to be used for recovery
+ * @info: optional information about the guilty task
*
* This generates a device wedged uevent for the DRM device specified by @dev.
* Recovery @method\(s) of choice will be sent in the uevent environment as
@@ -534,13 +558,13 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
*
* Returns: 0 on success, negative error code otherwise.
*/
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info)
{
+ char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];
+ char *envp[] = { event_string, NULL, NULL, NULL };
const char *recovery = NULL;
unsigned int len, opt;
- /* Event string length up to 28+ characters with available methods */
- char event_string[32];
- char *envp[] = { event_string, NULL };
len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
@@ -549,7 +573,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
break;
- len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
+ len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
}
if (recovery)
@@ -562,6 +586,13 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
"but recovered through reset" : "needs recovery");
+ if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {
+ snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);
+ snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);
+ envp[1] = pid_string;
+ envp[2] = comm_string;
+ }
+
return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_dev_wedged_event);
@@ -654,6 +685,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -751,10 +784,7 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
- accel_debugfs_init(dev);
- else
- drm_debugfs_dev_init(dev, drm_debugfs_root);
+ drm_debugfs_dev_init(dev);
return 0;
@@ -808,36 +838,62 @@ void *__devm_drm_dev_alloc(struct device *parent,
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * __drm_dev_alloc - Allocation of a &drm_device instance
* @parent: Parent device object
+ * @driver: DRM driver
+ * @size: the size of the struct which contains struct drm_device
+ * @offset: the offset of the &drm_device within the container.
*
- * This is the deprecated version of devm_drm_dev_alloc(), which does not support
- * subclassing through embedding the struct &drm_device in a driver private
- * structure, and which does not support automatic cleanup through devres.
+ * This should *NOT* be by any drivers, but is a dedicated interface for the
+ * corresponding Rust abstraction.
*
- * RETURNS:
- * Pointer to new DRM device, or ERR_PTR on failure.
+ * This is the same as devm_drm_dev_alloc(), but without the corresponding
+ * resource management through the parent device, but not the same as
+ * drm_dev_alloc(), since the latter is the deprecated version, which does not
+ * support subclassing.
+ *
+ * Returns: A pointer to new DRM device, or an ERR_PTR on failure.
*/
-struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
- struct device *parent)
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset)
{
- struct drm_device *dev;
+ void *container;
+ struct drm_device *drm;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
return ERR_PTR(-ENOMEM);
- ret = drm_dev_init(dev, driver, parent);
+ drm = container + offset;
+ ret = drm_dev_init(drm, driver, parent);
if (ret) {
- kfree(dev);
+ kfree(container);
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(drm, container);
- drmm_add_final_kfree(dev, dev);
+ return container;
+}
+EXPORT_SYMBOL(__drm_dev_alloc);
- return dev;
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
+ struct device *parent)
+{
+ return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -1168,7 +1224,7 @@ static void drm_core_exit(void)
drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
- debugfs_remove(drm_debugfs_root);
+ drm_debugfs_remove_root();
drm_sysfs_destroy();
WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
@@ -1187,7 +1243,8 @@ static int __init drm_core_init(void)
goto error;
}
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_debugfs_init_root();
+ drm_debugfs_bridge_params();
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..e2e85345aa9a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -31,6 +31,7 @@
#include <linux/bitfield.h>
#include <linux/byteorder/generic.h>
#include <linux/cec.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -66,34 +67,36 @@ static int oui(u8 first, u8 second, u8 third)
* on as many displays as possible).
*/
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* Force reduced-blanking timings for detailed modes */
-#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
-/* Force 8bpc */
-#define EDID_QUIRK_FORCE_8BPC (1 << 8)
-/* Force 12bpc */
-#define EDID_QUIRK_FORCE_12BPC (1 << 9)
-/* Force 6bpc */
-#define EDID_QUIRK_FORCE_6BPC (1 << 10)
-/* Force 10bpc */
-#define EDID_QUIRK_FORCE_10BPC (1 << 11)
-/* Non desktop display (i.e. HMD) */
-#define EDID_QUIRK_NON_DESKTOP (1 << 12)
-/* Cap the DSC target bitrate to 15bpp */
-#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
+enum drm_edid_internal_quirk {
+ /* First detailed mode wrong, use largest 60Hz mode */
+ EDID_QUIRK_PREFER_LARGE_60 = DRM_EDID_QUIRK_NUM,
+ /* Reported 135MHz pixel clock is too high, needs adjustment */
+ EDID_QUIRK_135_CLOCK_TOO_HIGH,
+ /* Prefer the largest mode at 75 Hz */
+ EDID_QUIRK_PREFER_LARGE_75,
+ /* Detail timing is in cm not mm */
+ EDID_QUIRK_DETAILED_IN_CM,
+ /* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+ EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE,
+ /* use +hsync +vsync for detailed mode */
+ EDID_QUIRK_DETAILED_SYNC_PP,
+ /* Force reduced-blanking timings for detailed modes */
+ EDID_QUIRK_FORCE_REDUCED_BLANKING,
+ /* Force 8bpc */
+ EDID_QUIRK_FORCE_8BPC,
+ /* Force 12bpc */
+ EDID_QUIRK_FORCE_12BPC,
+ /* Force 6bpc */
+ EDID_QUIRK_FORCE_6BPC,
+ /* Force 10bpc */
+ EDID_QUIRK_FORCE_10BPC,
+ /* Non desktop display (i.e. HMD) */
+ EDID_QUIRK_NON_DESKTOP,
+ /* Cap the DSC target bitrate to 15bpp */
+ EDID_QUIRK_CAP_DSC_15BPP,
+};
#define MICROSOFT_IEEE_OUI 0xca125c
@@ -128,124 +131,132 @@ static const struct edid_quirk {
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'C', 'R', 44358, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Acer F51 */
- EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'P', 'I', 0x7602, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('A', 'E', 'O', 0, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BenQ GW2765 */
- EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, BIT(EDID_QUIRK_FORCE_8BPC)),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x78b, BIT(EDID_QUIRK_FORCE_6BPC)),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('C', 'P', 'T', 0x17df, BIT(EDID_QUIRK_FORCE_6BPC)),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 0x3652, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x0771, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Belinea 10 15 55 */
- EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 1516, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Envision Peripherals, Inc. EN-7100e */
- EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
+ EDID_QUIRK('E', 'P', 'I', 59264, BIT(EDID_QUIRK_135_CLOCK_TOO_HIGH)),
/* Envision EN2028 */
- EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('E', 'P', 'I', 8232, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Funai Electronics PM36B */
- EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM),
+ EDID_QUIRK('F', 'C', 'M', 13600, BIT(EDID_QUIRK_PREFER_LARGE_75) |
+ BIT(EDID_QUIRK_DETAILED_IN_CM)),
/* LG 27GP950 */
- EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5bbf, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LG 27GN950 */
- EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5b9a, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+ EDID_QUIRK('L', 'G', 'D', 764, BIT(EDID_QUIRK_FORCE_10BPC)),
/* LG Philips LCD LP154W01-A5 */
- EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
- EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
/* Samsung SyncMaster 205BW. Note: irony */
- EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
+ EDID_QUIRK('S', 'A', 'M', 541, BIT(EDID_QUIRK_DETAILED_SYNC_PP)),
/* Samsung SyncMaster 22[5-6]BW */
- EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 596, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('S', 'A', 'M', 638, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, BIT(EDID_QUIRK_FORCE_12BPC)),
/* ViewSonic VA2026w */
- EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
+ EDID_QUIRK('V', 'S', 'C', 5020, BIT(EDID_QUIRK_FORCE_REDUCED_BLANKING)),
/* Medion MD 30217 PG */
- EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, BIT(EDID_QUIRK_PREFER_LARGE_75)),
/* Lenovo G50 */
- EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 18514, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('S', 'E', 'C', 0xd033, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('E', 'T', 'R', 13896, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Valve Index Headset */
- EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, BIT(EDID_QUIRK_NON_DESKTOP)),
/* HTC Vive and Vive Pro VR Headsets */
- EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0001, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Windows Mixed Reality Headsets */
- EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sony PlayStation VR Headset */
- EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sensics VR Headsets */
- EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'N', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
/* OSVR HDK and HDK2 VR Headsets */
- EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+
+ /*
+ * @drm_edid_internal_quirk entries end here, following with the
+ * @drm_edid_quirk entries.
+ */
+
+ /* HP ZR24w DP AUX DPCD access requires probing to prevent corruption. */
+ EDID_QUIRK('H', 'W', 'P', 0x2869, BIT(DRM_EDID_QUIRK_DP_DPCD_PROBE)),
};
/*
@@ -2951,6 +2962,18 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
return 0;
}
+static bool drm_edid_has_internal_quirk(struct drm_connector *connector,
+ enum drm_edid_internal_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+EXPORT_SYMBOL(drm_edid_has_quirk);
+
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
@@ -2960,7 +2983,6 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
*/
static void edid_fixup_preferred(struct drm_connector *connector)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2968,9 +2990,9 @@ static void edid_fixup_preferred(struct drm_connector *connector)
if (list_empty(&connector->probed_modes))
return;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60))
target_refresh = 60;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3474,7 +3496,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3508,7 +3529,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_REDUCED_BLANKING)) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3520,7 +3541,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_135_CLOCK_TOO_HIGH))
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3551,7 +3572,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_SYNC_PP)) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
@@ -3564,12 +3585,12 @@ set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_IN_CM)) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -5373,7 +5394,8 @@ static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
static void drm_calculate_luminance_range(struct drm_connector *connector)
{
- struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ const struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
struct drm_luminance_range_info *luminance_range =
&connector->display_info.luminance_range;
static const u8 pre_computed_values[] = {
@@ -5434,21 +5456,21 @@ static uint8_t hdr_metadata_type(const u8 *edid_ext)
static void
drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
{
+ struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
u16 len;
len = cea_db_payload_len(db);
- connector->hdr_sink_metadata.hdmi_type1.eotf =
- eotf_supported(db);
- connector->hdr_sink_metadata.hdmi_type1.metadata_type =
- hdr_metadata_type(db);
+ hdr_metadata->eotf = eotf_supported(db);
+ hdr_metadata->metadata_type = hdr_metadata_type(db);
if (len >= 4)
- connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
+ hdr_metadata->max_cll = db[4];
if (len >= 5)
- connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
+ hdr_metadata->max_fall = db[5];
if (len >= 6) {
- connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+ hdr_metadata->min_cll = db[6];
/* Calculate only when all values are available */
drm_calculate_luminance_range(connector);
@@ -6596,6 +6618,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ memset(&info->hdr_sink_metadata, 0, sizeof(info->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6733,26 +6756,26 @@ static void update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_NON_DESKTOP)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_CAP_DSC_15BPP))
info->max_dsc_bpp = 15;
- if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_6BPC))
info->bpc = 6;
- if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_8BPC))
info->bpc = 8;
- if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_10BPC))
info->bpc = 10;
- if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_12BPC))
info->bpc = 12;
/* Depends on info->cea_rev set by drm_parse_cea_ext() above */
@@ -6760,23 +6783,23 @@ out:
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings,
+ const struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
- unsigned pixel_clock = (timings->pixel_clock[0] |
- (timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16)) + 1;
- unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
- unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
- unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
- unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
- unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
- unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
- unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
- unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
- bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
- bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ unsigned int pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
+ unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
+ unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
+ unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
+ unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
+ unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
+ unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
+ unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
+ bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
+ bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
mode = drm_mode_create(dev);
if (!mode)
@@ -6833,6 +6856,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
return num_modes;
}
+static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
+ const struct displayid_formula_timings_9 *timings,
+ bool type_10)
+{
+ struct drm_display_mode *mode;
+ u16 hactive = le16_to_cpu(timings->hactive) + 1;
+ u16 vactive = le16_to_cpu(timings->vactive) + 1;
+ u8 timing_formula = timings->flags & 0x7;
+
+ /* TODO: support RB-v2 & RB-v3 */
+ if (timing_formula > 1)
+ return NULL;
+
+ /* TODO: support video-optimized refresh rate */
+ if (timings->flags & (1 << 4))
+ drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
+
+ mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
+ if (!mode)
+ return NULL;
+
+ /* TODO: interpret S3D flags */
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_formula_modes(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
+ int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
+
+ /* extended blocks are not supported yet */
+ if (timing_size != 6)
+ return 0;
+
+ if (block->num_bytes % timing_size)
+ return 0;
+
+ num_timings = block->num_bytes / timing_size;
+ for (int i = 0; i < num_timings; i++) {
+ const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
+
+ newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6845,6 +6928,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
+ else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
+ num_modes += add_displayid_formula_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -6854,7 +6940,6 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
static int _drm_edid_connector_add_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
if (!drm_edid)
@@ -6884,7 +6969,8 @@ static int _drm_edid_connector_add_modes(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60) ||
+ drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector);
return num_modes;
@@ -7099,18 +7185,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 18e366cc4993..8d0601400182 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -2,7 +2,9 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
+
#include <linux/dma-resv.h>
+#include <linux/export.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 2c4dc7ebc0c3..fd71969d2fb1 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -17,7 +17,9 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/module.h>
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 937c3939e502..11a5b60cb9ce 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 02a516e77192..8bd626ef16c7 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index f824369baacd..1e827bf8b815 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 73d35d59590c..85feb55bba11 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index cf2463090d3a..eebd1a05ee97 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -33,6 +33,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,6 +46,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -167,6 +169,9 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
drm_prime_init_file_private(&file->prime);
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_add(file);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
if (ret < 0)
@@ -181,6 +186,10 @@ out_prime_destroy:
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
+
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
put_pid(rcu_access_pointer(file->pid));
kfree(file);
@@ -235,6 +244,9 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -993,6 +1005,42 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
EXPORT_SYMBOL(drm_show_fdinfo);
/**
+ * drm_file_err - log process name, pid and client_name associated with a drm_file
+ * @file_priv: context of interest for process name and pid
+ * @fmt: printf() like format string
+ *
+ * Helper function for clients which needs to log process details such
+ * as name and pid etc along with user logs.
+ */
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ mutex_lock(&file_priv->client_name_lock);
+ rcu_read_lock();
+ pid = rcu_dereference(file_priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ drm_err(dev, "comm: %s pid: %d client-id:%llu client: %s ... %pV",
+ task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_id,
+ file_priv->client_name ?: "Unset", &vaf);
+
+ va_end(args);
+ rcu_read_unlock();
+ mutex_unlock(&file_priv->client_name_lock);
+}
+EXPORT_SYMBOL(drm_file_err);
+
+/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 8c6090a90d56..f5889dd8e7aa 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,6 +21,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 01d3ab307ac3..8f3daf38ca63 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,6 +8,7 @@
* (at your option) any later version.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -20,6 +21,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +247,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +517,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +556,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
-}
-
-/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
-static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
- unsigned int pixels)
-{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
}
/**
@@ -460,7 +568,6 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @state: Transform and conversion state
- * @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -475,39 +582,60 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
- if (swab)
- xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
- else
- xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+static void drm_fb_xrgb8888_to_rgb565be_line(void *dbuf, const void *sbuf,
+ unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565be);
+}
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
+/**
+ * drm_fb_xrgb8888_to_rgb565be - Convert XRGB8888 to RGB565|DRM_FORMAT_BIG_ENDIAN clip buffer
+ * @dst: Array of RGB565BE destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565BE devices that don't support XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565be_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565be);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +675,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +715,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +755,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -704,18 +795,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write red-green-blue to output in little endianness */
- *dbuf8++ = (pix & 0x00ff0000) >> 16;
- *dbuf8++ = (pix & 0x0000ff00) >> 8;
- *dbuf8++ = (pix & 0x000000ff) >> 0;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
}
/**
@@ -755,16 +835,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -804,26 +875,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
+ * @dst: Array of ABGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -832,29 +913,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
+ * @dst: Array of XBGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -863,23 +955,53 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+
+static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
+ * @dst: Array of BGRX8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgrx8888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -920,21 +1042,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -975,19 +1083,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -1031,36 +1127,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
- __le32 *dbuf32 = dbuf;
- __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u16 val16;
- u32 pix[2];
-
- for (x = 0; x < pixels2; x += 2, ++dbuf32) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- pix[1] = le32_to_cpu(sbuf32[x + 1]);
- val32 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4) |
- ((pix[1] & 0xf0000000) >> 0) |
- ((pix[1] & 0x00f00000) << 4) |
- ((pix[1] & 0x0000f000) << 8) |
- ((pix[1] & 0x000000f0) << 12);
- *dbuf32 = cpu_to_le32(val32);
- }
- for (; x < pixels; x++) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- val16 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
}
/**
@@ -1141,7 +1208,7 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
@@ -1292,141 +1359,3 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
-
-static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
-{
- /* only handle formats with depth != 0 and alpha channel */
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_ABGR1555:
- return DRM_FORMAT_XBGR1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_BGRA5551:
- return DRM_FORMAT_BGRX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_ABGR8888:
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
- case DRM_FORMAT_BGRA8888:
- return DRM_FORMAT_BGRX8888;
- case DRM_FORMAT_ARGB2101010:
- return DRM_FORMAT_XRGB2101010;
- case DRM_FORMAT_ABGR2101010:
- return DRM_FORMAT_XBGR2101010;
- case DRM_FORMAT_RGBA1010102:
- return DRM_FORMAT_RGBX1010102;
- case DRM_FORMAT_BGRA1010102:
- return DRM_FORMAT_BGRX1010102;
- }
-
- return fourcc;
-}
-
-static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
-{
- const uint32_t *fourccs_end = fourccs + nfourccs;
-
- while (fourccs < fourccs_end) {
- if (*fourccs == fourcc)
- return true;
- ++fourccs;
- }
- return false;
-}
-
-/**
- * drm_fb_build_fourcc_list - Filters a list of supported color formats against
- * the device's native formats
- * @dev: DRM device
- * @native_fourccs: 4CC codes of natively supported color formats
- * @native_nfourccs: The number of entries in @native_fourccs
- * @fourccs_out: Returns 4CC codes of supported color formats
- * @nfourccs_out: The number of available entries in @fourccs_out
- *
- * This function create a list of supported color format from natively
- * supported formats and additional emulated formats.
- * At a minimum, most userspace programs expect at least support for
- * XRGB8888 on the primary plane. Devices that have to emulate the
- * format, and possibly others, can use drm_fb_build_fourcc_list() to
- * create a list of supported color formats. The returned list can
- * be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Native formats with alpha channel
- * will be replaced by such without, as primary planes usually don't
- * support alpha. Other heuristics might be applied
- * to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list.
- *
- * Returns:
- * The number of color-formats 4CC codes returned in @fourccs_out.
- */
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out)
-{
- /*
- * XRGB8888 is the default fallback format for most of userspace
- * and it's currently the only format that should be emulated for
- * the primary plane. Only if there's ever another default fallback,
- * it should be added here.
- */
- static const uint32_t extra_fourccs[] = {
- DRM_FORMAT_XRGB8888,
- };
- static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
-
- u32 *fourccs = fourccs_out;
- const u32 *fourccs_end = fourccs_out + nfourccs_out;
- size_t i;
-
- /*
- * The device's native formats go first.
- */
-
- for (i = 0; i < native_nfourccs; ++i) {
- /*
- * Several DTs, boot loaders and firmware report native
- * alpha formats that are non-alpha formats instead. So
- * replace alpha formats by non-alpha formats.
- */
- u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- /*
- * The extra formats, emulated by the driver, go second.
- */
-
- for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = extra_fourccs[i];
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate and native entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- return fourccs - fourccs_out;
-}
-EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..ce29dd05bcc5
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/swab.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (77 * r + 150 * g + 29 * b) / 256;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565be(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
+{
+ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 8) |
+ ((pix & 0x0000ff00) << 8) |
+ ((pix & 0x000000ff) << 24);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 3a94ca211f9c..e0d533611040 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -238,6 +238,14 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGB161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
+ { .format = DRM_FORMAT_BGR161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -346,6 +354,33 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S010, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S210, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S410, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S012, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S212, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S412, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S016, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S216, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S416, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
};
unsigned int i;
@@ -382,7 +417,8 @@ EXPORT_SYMBOL(drm_format_info);
/**
* drm_get_format_info - query information for a given framebuffer configuration
* @dev: DRM device
- * @mode_cmd: metadata from the userspace fb creation request
+ * @pixel_format: pixel format (DRM_FORMAT_*)
+ * @modifier: modifier
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
@@ -390,15 +426,16 @@ EXPORT_SYMBOL(drm_format_info);
*/
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+ u32 pixel_format, u64 modifier)
{
const struct drm_format_info *info = NULL;
if (dev->mode_config.funcs->get_format_info)
- info = dev->mode_config.funcs->get_format_info(mode_cmd);
+ info = dev->mode_config.funcs->get_format_info(pixel_format,
+ modifier);
if (!info)
- info = drm_format_info(mode_cmd->pixel_format);
+ info = drm_format_info(pixel_format);
return info;
}
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index b781601946db..adbb73f00d68 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -153,18 +153,11 @@ int drm_mode_addfb_ioctl(struct drm_device *dev,
}
static int framebuffer_check(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *r)
{
- const struct drm_format_info *info;
int i;
- /* check if the format is supported at all */
- if (!__drm_format_info(r->pixel_format)) {
- drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
- &r->pixel_format);
- return -EINVAL;
- }
-
if (r->width == 0) {
drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
@@ -175,9 +168,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
- /* now let the driver pick its own format info */
- info = drm_get_format_info(dev, r);
-
for (i = 0; i < info->num_planes; i++) {
unsigned int width = drm_format_info_plane_width(info, r->width, i);
unsigned int height = drm_format_info_plane_height(info, r->height, i);
@@ -272,6 +262,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
+ const struct drm_format_info *info;
struct drm_framebuffer *fb;
int ret;
@@ -297,11 +288,21 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- ret = framebuffer_check(dev, r);
+ /* check if the format is supported at all */
+ if (!__drm_format_info(r->pixel_format)) {
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* now let the driver pick its own format info */
+ info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]);
+
+ ret = framebuffer_check(dev, info, r);
if (ret)
return ERR_PTR(ret);
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r);
if (IS_ERR(fb)) {
drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
@@ -862,11 +863,23 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free);
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
+ unsigned int i;
int ret;
+ bool exists;
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
return -EINVAL;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
+ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ if (fb->obj[i]) {
+ exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
+ if (exists)
+ fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ }
+ }
+
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
@@ -875,7 +888,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
if (ret)
- goto out;
+ goto err;
mutex_lock(&dev->mode_config.fb_lock);
dev->mode_config.num_fb++;
@@ -883,7 +896,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
mutex_unlock(&dev->mode_config.fb_lock);
drm_mode_object_register(dev, &fb->base);
-out:
+
+ return 0;
+
+err:
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
+ drm_gem_object_handle_put_unlocked(fb->obj[i]);
+ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ }
+ }
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
@@ -960,6 +982,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
+ unsigned int i;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
+ drm_gem_object_handle_put_unlocked(fb->obj[i]);
+ }
mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c6240bab3fa5..6a44351e58b7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
@@ -212,6 +213,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
+static void drm_gem_object_handle_get(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
+
+ if (obj->handle_count++ == 0)
+ drm_gem_object_get(obj);
+}
+
+/**
+ * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
+ * @obj: GEM object
+ *
+ * Acquires a reference on the GEM buffer object's handle. Required to keep
+ * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
+ * to release the reference. Does nothing if the buffer object has no handle.
+ *
+ * Returns:
+ * True if a handle exists, or false otherwise
+ */
+bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ guard(mutex)(&dev->object_name_lock);
+
+ /*
+ * First ref taken during GEM object creation, if any. Some
+ * drivers set up internal framebuffers with GEM objects that
+ * do not have a GEM handle. Hence, this counter can be zero.
+ */
+ if (!obj->handle_count)
+ return false;
+
+ drm_gem_object_handle_get(obj);
+
+ return true;
+}
+
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -242,20 +283,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
}
}
-static void
-drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
+/**
+ * drm_gem_object_handle_put_unlocked - releases reference on user-space handle
+ * @obj: GEM object
+ *
+ * Releases a reference on the GEM buffer object's handle. Possibly releases
+ * the GEM buffer object and associated dma-buf objects.
+ */
+void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
+ if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
return;
/*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before
+ * we checked for a name.
+ */
mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) {
@@ -279,6 +326,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
+ if (drm_WARN_ON(obj->dev, !data))
+ return 0;
+
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
@@ -389,8 +439,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
- if (obj->handle_count++ == 0)
- drm_gem_object_get(obj);
+
+ drm_gem_object_handle_get(obj);
/*
* Get the user-visible handle using idr. Preload and perform
@@ -399,7 +449,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
- ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+ ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
idr_preload_end();
@@ -420,6 +470,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
goto err_revoke;
}
+ /* mirrors drm_gem_handle_delete to avoid races */
+ spin_lock(&file_priv->table_lock);
+ obj = idr_replace(&file_priv->object_idr, obj, handle);
+ WARN_ON(obj != NULL);
+ spin_unlock(&file_priv->table_lock);
*handlep = handle;
return 0;
@@ -1184,39 +1239,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->pin)
- return obj->funcs->pin(obj);
-
- return 0;
-}
-
-void drm_gem_unpin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->unpin)
- obj->funcs->unpin(obj);
-}
-
-int drm_gem_pin(struct drm_gem_object *obj)
-{
- int ret;
-
- dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_pin_locked(obj);
- dma_resv_unlock(obj->resv);
-
- return ret;
-}
-
-void drm_gem_unpin(struct drm_gem_object *obj)
-{
- dma_resv_lock(obj->resv, NULL);
- drm_gem_unpin_locked(obj);
- dma_resv_unlock(obj->resv);
-}
-
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1256,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1271,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1285,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1460,12 +1483,14 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
* @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
+ * @ticket: Optional ww_acquire_ctx context to use for locking
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj))
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket)
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
@@ -1498,17 +1523,20 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
*/
mutex_unlock(lru->lock);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
+
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv)) {
+ if (!ww_mutex_trylock(&obj->resv->lock, ticket)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail;
}
- if (shrink(obj)) {
+ if (shrink(obj, ticket)) {
freed += obj->size >> PAGE_SHIFT;
/*
@@ -1522,6 +1550,9 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
dma_resv_unlock(obj->resv);
+ if (ticket)
+ ww_acquire_fini(ticket);
+
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
@@ -1543,10 +1574,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1589,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 93337543aac3..ebf305fb24f0 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -2,6 +2,7 @@
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
+#include <linux/export.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index b7f033d4352a..4f0320df858f 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
- dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 0fbeb686e561..4bc89d33df59 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -67,6 +68,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
static int
drm_gem_fb_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
@@ -74,7 +76,7 @@ drm_gem_fb_init(struct drm_device *dev,
unsigned int i;
int ret;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
@@ -135,6 +137,7 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* @dev: DRM device
* @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -151,20 +154,14 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];
unsigned int i;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- drm_dbg_kms(dev, "Failed to get FB format info\n");
- return -EINVAL;
- }
-
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -199,7 +196,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
}
}
- ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs);
if (ret)
goto err_gem_object_put;
@@ -220,6 +217,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
* callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -232,6 +230,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
@@ -242,7 +241,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -262,6 +261,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -281,9 +281,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
@@ -299,6 +300,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -319,9 +321,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
*/
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs_dirtyfb);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
@@ -362,7 +365,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +387,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +414,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
@@ -419,6 +422,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
@@ -427,9 +431,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
@@ -452,6 +457,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
@@ -462,9 +468,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
@@ -497,12 +504,9 @@ EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
// TODO Drop this function and replace by drm_format_info_bpp() once all
// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
-
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -516,6 +520,7 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
@@ -556,7 +561,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
afbc_fb->offset = mode_cmd->offsets[0];
- bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd);
if (!bpp) {
drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
return -EINVAL;
@@ -578,6 +583,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
*
* @dev: DRM device
* @afbc_fb: afbc-specific framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @afbc_fb: afbc framebuffer
*
@@ -591,24 +597,24 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
* Zero on success or a negative error value on failure.
*/
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
- const struct drm_format_info *info;
struct drm_gem_object **objs;
int ret;
objs = afbc_fb->base.obj;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return -EINVAL;
- ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb);
if (ret < 0)
return ret;
- if (objs[0]->size < afbc_fb->afbc_size)
+ if (objs[0]->size < afbc_fb->afbc_size) {
+ drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum afbc size (%u)\n",
+ objs[0]->size, afbc_fb->afbc_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..5d1349c34afd 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
- ret = drm_gem_shmem_get_pages(shmem);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
@@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
+
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +337,45 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
- ret = dma_buf_vmap(obj->dma_buf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
- return -EIO;
- }
- }
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -379,49 +387,45 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +453,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +464,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +480,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +496,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -575,8 +579,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +593,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -639,7 +643,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -666,11 +670,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -707,7 +712,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +734,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
@@ -799,6 +804,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b..257cca4cb97a 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 22b1fe9c03b8..b04cde4a60e7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -88,11 +89,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
- * For drawing or scanout operations, rsp. buffer objects have to be pinned
- * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
@@ -299,30 +295,7 @@ out:
return 0;
}
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Small buffer objects, such as cursor images, can lead to memory
- * fragmentation if they are pinned in the middle of video RAM. This
- * is especially a problem on devices with only a small amount of
- * video RAM. Fragmentation can prevent the primary framebuffer from
- * fitting in, even though there's enough memory overall. The modifier
- * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
- * at the high end of the memory region to avoid fragmentation.
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
@@ -334,7 +307,6 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
@@ -343,15 +315,7 @@ static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
ttm_bo_unpin(&gbo->bo);
}
-/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
- * @gbo: the GEM VRAM object
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+static int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
@@ -364,7 +328,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vram_unpin);
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
@@ -690,41 +653,6 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
- * @gem: The GEM object to pin
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- /*
- * Fbdev console emulation is the use case of these PRIME
- * helpers. This may involve updating a hardware buffer from
- * a shadow FB. We pin the buffer to it's current location
- * (either video RAM or system memory) to prevent it from
- * being relocated during the update operation. If you require
- * the buffer to be pinned to VRAM, implement a callback that
- * sets the flags accordingly.
- */
- return drm_gem_vram_pin_locked(gbo, 0);
-}
-
-/**
- * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
- * @gem: The GEM object to unpin
- */
-static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- drm_gem_vram_unpin_locked(gbo);
-}
-
-/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
@@ -762,8 +690,6 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
- .pin = drm_gem_vram_object_pin,
- .unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index de424e670995..5bb4c77db2c3 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -7,11 +7,11 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/hmm.h>
+#include <linux/hugetlb_inline.h>
#include <linux/memremap.h>
-#include <linux/migrate.h>
#include <linux/mm_types.h>
-#include <linux/pagemap.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
@@ -108,21 +108,6 @@
*/
/**
- * DOC: Migration
- *
- * The migration support is quite simple, allowing migration between RAM and
- * device memory at the range granularity. For example, GPU SVM currently does
- * not support mixing RAM and device memory pages within a range. This means
- * that upon GPU fault, the entire range can be migrated to device memory, and
- * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
- * memory storage within a range could be added in the future if required.
- *
- * The reasoning for only supporting range granularity is as follows: it
- * simplifies the implementation, and range sizes are driver-defined and should
- * be relatively small.
- */
-
-/**
* DOC: Partial Unmapping of Ranges
*
* Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
@@ -191,12 +176,9 @@
* }
*
* if (driver_migration_policy(range)) {
- * mmap_read_lock(mm);
- * devmem = driver_alloc_devmem();
- * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
- * devmem_allocation,
- * &ctx);
- * mmap_read_unlock(mm);
+ * err = drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
+ * gpuva_start, gpuva_end, gpusvm->mm,
+ * ctx->timeslice_ms);
* if (err) // CPU mappings may have changed
* goto retry;
* }
@@ -289,97 +271,6 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * struct drm_gpusvm_zdd - GPU SVM zone device data
- *
- * @refcount: Reference count for the zdd
- * @devmem_allocation: device memory allocation
- * @device_private_page_owner: Device private pages owner
- *
- * This structure serves as a generic wrapper installed in
- * page->zone_device_data. It provides infrastructure for looking up a device
- * memory allocation upon CPU page fault and asynchronously releasing device
- * memory once the CPU has no page references. Asynchronous release is useful
- * because CPU page references can be dropped in IRQ contexts, while releasing
- * device memory likely requires sleeping locks.
- */
-struct drm_gpusvm_zdd {
- struct kref refcount;
- struct drm_gpusvm_devmem *devmem_allocation;
- void *device_private_page_owner;
-};
-
-/**
- * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
- * @device_private_page_owner: Device private pages owner
- *
- * This function allocates and initializes a new zdd structure. It sets up the
- * reference count and initializes the destroy work.
- *
- * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
- */
-static struct drm_gpusvm_zdd *
-drm_gpusvm_zdd_alloc(void *device_private_page_owner)
-{
- struct drm_gpusvm_zdd *zdd;
-
- zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
- if (!zdd)
- return NULL;
-
- kref_init(&zdd->refcount);
- zdd->devmem_allocation = NULL;
- zdd->device_private_page_owner = device_private_page_owner;
-
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
- * @zdd: Pointer to the zdd structure.
- *
- * This function increments the reference count of the provided zdd structure.
- *
- * Return: Pointer to the zdd structure.
- */
-static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
-{
- kref_get(&zdd->refcount);
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
- * @ref: Pointer to the reference count structure.
- *
- * This function queues the destroy_work of the zdd for asynchronous destruction.
- */
-static void drm_gpusvm_zdd_destroy(struct kref *ref)
-{
- struct drm_gpusvm_zdd *zdd =
- container_of(ref, struct drm_gpusvm_zdd, refcount);
- struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
-
- if (devmem) {
- complete_all(&devmem->detached);
- if (devmem->ops->devmem_release)
- devmem->ops->devmem_release(devmem);
- }
- kfree(zdd);
-}
-
-/**
- * drm_gpusvm_zdd_put() - Put a zdd reference.
- * @zdd: Pointer to the zdd structure.
- *
- * This function decrements the reference count of the provided zdd structure
- * and schedules its destruction if the count drops to zero.
- */
-static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
-{
- kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
-}
-
-/**
* drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
* @notifier: Pointer to the GPU SVM notifier structure.
* @start: Start address of the range
@@ -945,7 +836,7 @@ retry:
* process-many-malloc' fails. In the failure case, each process
* mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
* ranges. When migrating the SVM ranges, some processes fail in
- * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
+ * drm_pagemap_migrate_to_devmem with 'migrate.cpages != npages'
* and then upon drm_gpusvm_range_get_pages device pages from
* other processes are collected + faulted in which creates all
* sorts of problems. Unsure exactly how this happening, also
@@ -981,6 +872,40 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
#endif
/**
+ * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @start: The inclusive start user address.
+ * @end: The exclusive end user address.
+ *
+ * Returns: The start address of first VMA within the provided range,
+ * ULONG_MAX otherwise. Assumes start_addr < end_addr.
+ */
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = ULONG_MAX;
+
+ if (!mmget_not_zero(mm))
+ return addr;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma_intersection(mm, start, end);
+ if (vma)
+ addr = vma->vm_start;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start);
+
+/**
* drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @fault_addr: Fault address
@@ -1118,6 +1043,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
lockdep_assert_held(&gpusvm->notifier_lock);
if (range->flags.has_dma_mapping) {
+ struct drm_gpusvm_range_flags flags = {
+ .__flags = range->flags.__flags,
+ };
+
for (i = 0, j = 0; i < npages; j++) {
struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
@@ -1131,8 +1060,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
dev, *addr);
i += 1 << addr->order;
}
- range->flags.has_devmem_pages = false;
- range->flags.has_dma_mapping = false;
+
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ flags.has_devmem_pages = false;
+ flags.has_dma_mapping = false;
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
range->dpagemap = NULL;
}
}
@@ -1321,7 +1254,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
.dev_private_owner = gpusvm->device_private_page_owner,
};
struct mm_struct *mm = gpusvm->mm;
- struct drm_gpusvm_zdd *zdd;
+ void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
@@ -1330,10 +1263,10 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
- struct page **pages;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
+ struct drm_gpusvm_range_flags flags;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
@@ -1369,7 +1302,6 @@ retry:
if (err)
goto err_free;
- pages = (struct page **)pfns;
map_pages:
/*
* Perform all dma mappings under the notifier lock to not
@@ -1378,7 +1310,8 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- if (range->flags.unmapped) {
+ flags.__flags = range->flags.__flags;
+ if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
goto err_free;
@@ -1404,6 +1337,7 @@ map_pages:
}
zdd = NULL;
+ pagemap = NULL;
num_dma_mapped = 0;
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
@@ -1423,7 +1357,7 @@ map_pages:
}
pagemap = page_pgmap(page);
- dpagemap = zdd->devmem_allocation->dpagemap;
+ dpagemap = drm_pagemap_page_to_dpagemap(page);
if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
/*
* Raced. This is not supposed to happen
@@ -1444,16 +1378,19 @@ map_pages:
err = -EFAULT;
goto err_unmap;
}
-
- pages[i] = page;
} else {
dma_addr_t addr;
- if (is_zone_device_page(page) || zdd) {
+ if (is_zone_device_page(page) || pagemap) {
err = -EOPNOTSUPP;
goto err_unmap;
}
+ if (ctx->devmem_only) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
@@ -1469,14 +1406,17 @@ map_pages:
}
i += 1 << order;
num_dma_mapped = i;
- range->flags.has_dma_mapping = true;
+ flags.has_dma_mapping = true;
}
- if (zdd) {
- range->flags.has_devmem_pages = true;
+ if (pagemap) {
+ flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
@@ -1497,6 +1437,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1527,553 +1468,11 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
/**
- * drm_gpusvm_migration_unlock_put_page() - Put a migration page
- * @page: Pointer to the page to put
- *
- * This function unlocks and puts a page.
- */
-static void drm_gpusvm_migration_unlock_put_page(struct page *page)
-{
- unlock_page(page);
- put_page(page);
-}
-
-/**
- * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
- * @npages: Number of pages
- * @migrate_pfn: Array of migrate page frame numbers
- *
- * This function unlocks and puts an array of pages.
- */
-static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
- unsigned long *migrate_pfn)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!migrate_pfn[i])
- continue;
-
- page = migrate_pfn_to_page(migrate_pfn[i]);
- drm_gpusvm_migration_unlock_put_page(page);
- migrate_pfn[i] = 0;
- }
-}
-
-/**
- * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
- * @page: Pointer to the page
- * @zdd: Pointer to the GPU SVM zone device data
- *
- * This function associates the given page with the specified GPU SVM zone
- * device data and initializes it for zone device usage.
- */
-static void drm_gpusvm_get_devmem_page(struct page *page,
- struct drm_gpusvm_zdd *zdd)
-{
- page->zone_device_data = drm_gpusvm_zdd_get(zdd);
- zone_device_page_init(page);
-}
-
-/**
- * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
- * @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
- * @migrate_pfn: Array of migrate page frame numbers to map
- * @npages: Number of pages to map
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function maps pages of memory for migration usage in GPU SVM. It
- * iterates over each page frame number provided in @migrate_pfn, maps the
- * corresponding page, and stores the DMA address in the provided @dma_addr
- * array.
- *
- * Return: 0 on success, -EFAULT if an error occurs during mapping.
- */
-static int drm_gpusvm_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long *migrate_pfn,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
-
- if (!page)
- continue;
-
- if (WARN_ON_ONCE(is_zone_device_page(page)))
- return -EFAULT;
-
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
- * @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
- * @npages: Number of pages to unmap
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function unmaps previously mapped pages of memory for GPU Shared Virtual
- * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
- * if it's valid and not already unmapped, and unmaps the corresponding page.
- */
-static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
-
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
- }
-}
-
-/**
- * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
- * @devmem_allocation: Pointer to the device memory allocation. The caller
- * should hold a reference to the device memory allocation,
- * which should be dropped via ops->devmem_release or upon
- * the failure of this function.
- * @ctx: GPU SVM context
- *
- * This function migrates the specified GPU SVM range to device memory. It
- * performs the necessary setup and invokes the driver-specific operations for
- * migration to device memory. Upon successful return, @devmem_allocation can
- * safely reference @range until ops->devmem_release is called which only upon
- * successful return. Expected to be called while holding the mmap lock in read
- * mode.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- struct drm_gpusvm_devmem *devmem_allocation,
- const struct drm_gpusvm_ctx *ctx)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long start = drm_gpusvm_range_start(range),
- end = drm_gpusvm_range_end(range);
- struct migrate_vma migrate = {
- .start = start,
- .end = end,
- .pgmap_owner = gpusvm->device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_SYSTEM,
- };
- struct mm_struct *mm = gpusvm->mm;
- unsigned long i, npages = npages_in_range(start, end);
- struct vm_area_struct *vas;
- struct drm_gpusvm_zdd *zdd = NULL;
- struct page **pages;
- dma_addr_t *dma_addr;
- void *buf;
- int err;
-
- mmap_assert_locked(gpusvm->mm);
-
- if (!range->flags.migrate_devmem)
- return -EINVAL;
-
- if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
- !ops->copy_to_ram)
- return -EOPNOTSUPP;
-
- vas = vma_lookup(mm, start);
- if (!vas) {
- err = -ENOENT;
- goto err_out;
- }
-
- if (end > vas->vm_end || start < vas->vm_start) {
- err = -EINVAL;
- goto err_out;
- }
-
- if (!vma_is_anonymous(vas)) {
- err = -EBUSY;
- goto err_out;
- }
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
- if (!zdd) {
- err = -ENOMEM;
- goto err_free;
- }
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- if (!migrate.cpages) {
- err = -EFAULT;
- goto err_free;
- }
-
- if (migrate.cpages != npages) {
- err = -EBUSY;
- goto err_finalize;
- }
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- migrate.src, npages, DMA_TO_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = pfn_to_page(migrate.dst[i]);
-
- pages[i] = page;
- migrate.dst[i] = migrate_pfn(migrate.dst[i]);
- drm_gpusvm_get_devmem_page(page, zdd);
- }
-
- err = ops->copy_to_devmem(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
- /* Upon success bind devmem allocation to range and zdd */
- zdd->devmem_allocation = devmem_allocation; /* Owns ref */
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_TO_DEVICE);
-err_free:
- if (zdd)
- drm_gpusvm_zdd_put(zdd);
- kvfree(buf);
-err_out:
- return err;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
-
-/**
- * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
- * @vas: Pointer to the VM area structure, can be NULL
- * @fault_page: Fault page
- * @npages: Number of pages to populate
- * @mpages: Number of pages to migrate
- * @src_mpfn: Source array of migrate PFNs
- * @mpfn: Array of migrate PFNs to populate
- * @addr: Start address for PFN allocation
- *
- * This function populates the RAM migrate page frame numbers (PFNs) for the
- * specified VM area structure. It allocates and locks pages in the VM area for
- * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
- * alloc_page for allocation.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
- struct page *fault_page,
- unsigned long npages,
- unsigned long *mpages,
- unsigned long *src_mpfn,
- unsigned long *mpfn,
- unsigned long addr)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
-
- if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- src_page = migrate_pfn_to_page(src_mpfn[i]);
- if (!src_page)
- continue;
-
- if (fault_page) {
- if (src_page->zone_device_data !=
- fault_page->zone_device_data)
- continue;
- }
-
- if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
- else
- page = alloc_page(GFP_HIGHUSER);
-
- if (!page)
- goto free_pages;
-
- mpfn[i] = migrate_pfn(page_to_pfn(page));
- }
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
- }
-
- return 0;
-
-free_pages:
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- put_page(page);
- mpfn[i] = 0;
- }
- return -ENOMEM;
-}
-
-/**
- * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
- * @devmem_allocation: Pointer to the device memory allocation
- *
- * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
- * migration done via migrate_device_* functions.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long npages, mpages = 0;
- struct page **pages;
- unsigned long *src, *dst;
- dma_addr_t *dma_addr;
- void *buf;
- int i, err = 0;
- unsigned int retry_count = 2;
-
- npages = devmem_allocation->size >> PAGE_SHIFT;
-
-retry:
- if (!mmget_not_zero(devmem_allocation->mm))
- return -EFAULT;
-
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- src = buf;
- dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
- if (err)
- goto err_free;
-
- err = migrate_device_pfns(src, npages);
- if (err)
- goto err_free;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
- src, dst, 0);
- if (err || !mpages)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- dst, npages, DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, dst);
- migrate_device_pages(src, dst, npages);
- migrate_device_finalize(src, dst, npages);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
- mmput_async(devmem_allocation->mm);
-
- if (completion_done(&devmem_allocation->detached))
- return 0;
-
- if (retry_count--) {
- cond_resched();
- goto retry;
- }
-
- return err ?: -EBUSY;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
-
-/**
- * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
- * @vas: Pointer to the VM area structure
- * @device_private_page_owner: Device private pages owner
- * @page: Pointer to the page for fault handling (can be NULL)
- * @fault_addr: Fault address
- * @size: Size of migration
- *
- * This internal function performs the migration of the specified GPU SVM range
- * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
- * invokes the driver-specific operations for migration to RAM.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
- void *device_private_page_owner,
- struct page *page,
- unsigned long fault_addr,
- unsigned long size)
-{
- struct migrate_vma migrate = {
- .vma = vas,
- .pgmap_owner = device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
- MIGRATE_VMA_SELECT_DEVICE_COHERENT,
- .fault_page = page,
- };
- struct drm_gpusvm_zdd *zdd;
- const struct drm_gpusvm_devmem_ops *ops;
- struct device *dev = NULL;
- unsigned long npages, mpages = 0;
- struct page **pages;
- dma_addr_t *dma_addr;
- unsigned long start, end;
- void *buf;
- int i, err = 0;
-
- start = ALIGN_DOWN(fault_addr, size);
- end = ALIGN(fault_addr + 1, size);
-
- /* Corner where VMA area struct has been partially unmapped */
- if (start < vas->vm_start)
- start = vas->vm_start;
- if (end > vas->vm_end)
- end = vas->vm_end;
-
- migrate.start = start;
- migrate.end = end;
- npages = npages_in_range(start, end);
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- /* Raced with another CPU fault, nothing to do */
- if (!migrate.cpages)
- goto err_free;
-
- if (!page) {
- for (i = 0; i < npages; ++i) {
- if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- page = migrate_pfn_to_page(migrate.src[i]);
- break;
- }
-
- if (!page)
- goto err_finalize;
- }
- zdd = page->zone_device_data;
- ops = zdd->devmem_allocation->ops;
- dev = zdd->devmem_allocation->dev;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
- migrate.src, migrate.dst,
- start);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
- DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(migrate.src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- if (dev)
- drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
-
- return err;
-}
-
-/**
- * drm_gpusvm_range_evict - Evict GPU SVM range
* @range: Pointer to the GPU SVM range to be removed
*
- * This function evicts the specified GPU SVM range. This function will not
- * evict coherent pages.
+ * This function evicts the specified GPU SVM range.
*
* Return: 0 on success, a negative error code on failure.
*/
@@ -2126,60 +1525,6 @@ int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
/**
- * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
- *
- * This function is a callback used to put the GPU SVM zone device data
- * associated with a page when it is being released.
- */
-static void drm_gpusvm_page_free(struct page *page)
-{
- drm_gpusvm_zdd_put(page->zone_device_data);
-}
-
-/**
- * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
- * @vmf: Pointer to the fault information structure
- *
- * This function is a page fault handler used to migrate a GPU SVM range to RAM.
- * It retrieves the GPU SVM range information from the faulting page and invokes
- * the internal migration function to migrate the range back to RAM.
- *
- * Return: VM_FAULT_SIGBUS on failure, 0 on success.
- */
-static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
-{
- struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
- int err;
-
- err = __drm_gpusvm_migrate_to_ram(vmf->vma,
- zdd->device_private_page_owner,
- vmf->page, vmf->address,
- zdd->devmem_allocation->size);
-
- return err ? VM_FAULT_SIGBUS : 0;
-}
-
-/*
- * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
- */
-static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
- .page_free = drm_gpusvm_page_free,
- .migrate_to_ram = drm_gpusvm_migrate_to_ram,
-};
-
-/**
- * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
- *
- * Return: Pointer to the GPU SVM device page map operations structure.
- */
-const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
-{
- return &drm_gpusvm_pagemap_ops;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
-
-/**
* drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
* @gpusvm: Pointer to the GPU SVM structure.
* @start: Start address
@@ -2223,28 +1568,5 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
-/**
- * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
- *
- * @dev: Pointer to the device structure which device memory allocation belongs to
- * @mm: Pointer to the mm_struct for the address space
- * @ops: Pointer to the operations structure for GPU SVM device memory
- * @dpagemap: The struct drm_pagemap we're allocating from.
- * @size: Size of device memory allocation
- */
-void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
- struct device *dev, struct mm_struct *mm,
- const struct drm_gpusvm_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
-{
- init_completion(&devmem_allocation->detached);
- devmem_allocation->dev = dev;
- devmem_allocation->mm = mm;
- devmem_allocation->ops = ops;
- devmem_allocation->dpagemap = dpagemap;
- devmem_allocation->size = size;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
-
MODULE_DESCRIPTION("DRM GPUSVM");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..bbc7fecb6f4a 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -27,6 +27,7 @@
#include <drm/drm_gpuvm.h>
+#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
@@ -2299,13 +2300,13 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @priv: pointer to a driver private data structure
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
- * @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2349,7 +2350,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
@@ -2390,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+static int
+drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
+{
+ struct drm_exec *exec = priv;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.unmap->va->gem.obj)
+ return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
+ return 0;
+ case DRM_GPUVA_OP_UNMAP:
+ if (op->unmap.va->gem.obj)
+ return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct drm_gpuvm_ops lock_ops = {
+ .sm_step_map = drm_gpuva_sm_step_lock,
+ .sm_step_remap = drm_gpuva_sm_step_lock,
+ .sm_step_unmap = drm_gpuva_sm_step_lock,
+};
+
+/**
+ * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @num_fences: for newly mapped objects, the # of fences to reserve
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
+ * will be newly mapped.
+ *
+ * The expected usage is:
+ *
+ * vm_bind {
+ * struct drm_exec exec;
+ *
+ * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
+ * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
+ *
+ * drm_exec_until_all_locked (&exec) {
+ * for_each_vm_bind_operation {
+ * switch (op->op) {
+ * case DRIVER_OP_UNMAP:
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * break;
+ * case DRIVER_OP_MAP:
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
+ * op->addr, op->range,
+ * obj, op->obj_offset);
+ * break;
+ * }
+ *
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * return ret;
+ * }
+ * }
+ * }
+ *
+ * This enables all locking to be performed before the driver begins modifying
+ * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
+ * where an earlier op can alter the sequence of steps generated for a later
+ * op, because the later altered step will involve the same GEM object(s)
+ * already seen in the earlier locking step. For example:
+ *
+ * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
+ * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
+ * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
+ *
+ * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
+ * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
+ * required without the earlier DRIVER_OP_MAP. This is safe because we've
+ * already locked the GEM object in the earlier DRIVER_OP_MAP step.
+ *
+ * Returns: 0 on success or a negative error codec
+ */
+int
+drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ if (req_obj) {
+ int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
+ req_addr, req_range,
+ req_obj, req_offset);
+
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
+
+/**
+ * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped by drm_gpuvm_sm_unmap().
+ *
+ * See drm_gpuvm_sm_map_exec_lock() for expected usage.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range)
+{
+ return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..e79c3c623c9a 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -161,6 +161,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
+bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -175,19 +177,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-int drm_gem_pin_locked(struct drm_gem_object *obj);
-void drm_gem_unpin_locked(struct drm_gem_object *obj);
-int drm_gem_pin(struct drm_gem_object *obj);
-void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_dev_fini(struct drm_device *dev);
void drm_debugfs_dev_register(struct drm_device *dev);
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root);
+int drm_debugfs_register(struct drm_minor *minor, int minor_id);
void drm_debugfs_unregister(struct drm_minor *minor);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
@@ -205,8 +202,7 @@ static inline void drm_debugfs_dev_register(struct drm_device *dev)
{
}
-static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
return 0;
}
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index cc4c463daae7..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a4cd476f9b30..e33c78fc8fbd 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -229,7 +230,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
case DRM_FORMAT_XRGB8888:
switch (dbidev->pixel_format) {
case DRM_FORMAT_RGB565:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
+ if (swap) {
+ drm_fb_xrgb8888_to_rgb565be(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ }
break;
case DRM_FORMAT_RGB888:
drm_fb_xrgb8888_to_rgb888(&dst_map, NULL, src, fb, clip, fmtcnv_state);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index dfa595556320..3a9b3278a6e3 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -26,6 +26,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -36,6 +37,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -89,12 +92,13 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.restore = pm_generic_restore,
};
-static const struct bus_type mipi_dsi_bus_type = {
+const struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops,
};
+EXPORT_SYMBOL_GPL(mipi_dsi_bus_type);
/**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
@@ -871,6 +875,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8642a2fb25a9..25f376869b3a 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
@@ -383,6 +384,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS_ASYNC", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.async_modifiers_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"SIZE_HINTS", 0);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 5565464c1734..988735560570 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,8 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
@@ -72,6 +74,7 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @dev: DRM device
* @fb: drm_framebuffer object to fill out
+ * @info: pixel format information
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
@@ -79,12 +82,13 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
*/
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
fb->dev = dev;
- fb->format = drm_get_format_info(dev, mode_cmd);
+ fb->format = info;
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7694b85e75e3..beb91a13a312 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,6 +21,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
new file mode 100644
index 000000000000..1da55322af12
--- /dev/null
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_pagemap.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
+ * providing a way to populate a struct mm_struct virtual range with device
+ * private pages and to provide helpers to abstract device memory allocations,
+ * to migrate memory back and forth between device memory and system RAM and
+ * to handle access (and in the future migration) between devices implementing
+ * a fast interconnect that is not necessarily visible to the rest of the
+ * system.
+ *
+ * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
+ * instances to populate struct mm_struct virtual ranges with memory, and the
+ * migration is best effort only and may thus fail. The implementation should
+ * also handle device unbinding by blocking (return an -ENODEV) error for new
+ * population requests and after that migrate all device pages to system ram.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * Migration granularity typically follows the GPU SVM range requests, but
+ * if there are clashes, due to races or due to the fact that multiple GPU
+ * SVM instances have different views of the ranges used, and because of that
+ * parts of a requested range is already present in the requested device memory,
+ * the implementation has a variety of options. It can fail and it can choose
+ * to populate only the part of the range that isn't already in device memory,
+ * and it can evict the range to system before trying to migrate. Ideally an
+ * implementation would just try to migrate the missing part of the range and
+ * allocate just enough memory to do so.
+ *
+ * When migrating to system memory as a response to a cpu fault or a device
+ * memory eviction request, currently a full device memory allocation is
+ * migrated back to system. Moving forward this might need improvement for
+ * situations where a single page needs bouncing between system memory and
+ * device memory due to, for example, atomic operations.
+ *
+ * Key DRM pagemap components:
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for the drm_pagemap to
+ * migrate to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ */
+
+/**
+ * struct drm_pagemap_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_pagemap_zdd {
+ struct kref refcount;
+ struct drm_pagemap_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_pagemap_zdd *
+drm_pagemap_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_pagemap_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_pagemap_zdd_destroy(struct kref *ref)
+{
+ struct drm_pagemap_zdd *zdd =
+ container_of(ref, struct drm_pagemap_zdd, refcount);
+ struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_pagemap_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_pagemap_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_pagemap_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_pagemap_get_devmem_page(struct page *page,
+ struct drm_pagemap_zdd *zdd)
+{
+ page->zone_device_data = drm_pagemap_zdd_get(zdd);
+ zone_device_page_init(page);
+}
+
+/**
+ * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Returns: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_pagemap_migrate_map_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+
+ if (!page)
+ continue;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, dma_addr[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_pagemap_migrate_unmap_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ continue;
+
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ }
+}
+
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
+ * @devmem_allocation: The device memory allocation to migrate to.
+ * The caller should hold a reference to the device memory allocation,
+ * and the reference is consumed by this function unless it returns with
+ * an error.
+ * @mm: Pointer to the struct mm_struct.
+ * @start: Start of the virtual address range to migrate.
+ * @end: End of the virtual address range to migrate.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @pgmap_owner: Not used currently, since only system memory is considered.
+ *
+ * This function migrates the specified virtual address range to device memory.
+ * It performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Expected to be called while holding the mmap lock in
+ * at least read mode.
+ *
+ * Note: The @timeslice_ms parameter can typically be used to force data to
+ * remain in pagemap pages long enough for a GPU to perform a task and to prevent
+ * a migration livelock. One alternative would be for the GPU driver to block
+ * in a mmu_notifier for the specified amount of time, but adding the
+ * functionality to the pagemap is likely nicer to the system as a whole.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = pgmap_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_pagemap_zdd *zdd = NULL;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(mm);
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ zdd = drm_pagemap_zdd_alloc(pgmap_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_pagemap_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(timeslice_ms);
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_pagemap_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
+
+/**
+ * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ struct page *page, *src_page;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ continue;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ continue;
+ }
+
+ if (vas)
+ page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ else
+ page = alloc_page(GFP_HIGHUSER);
+
+ if (!page)
+ goto free_pages;
+
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+ }
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ WARN_ON_ONCE(!trylock_page(page));
+ ++*mpages;
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ put_page(page);
+ mpfn[i] = 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ dma_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
+
+/**
+ * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_pagemap_zdd *zdd;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
+ * @page: Pointer to the page
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_pagemap_page_free(struct page *page)
+{
+ drm_pagemap_zdd_put(page->zone_device_data);
+}
+
+/**
+ * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a virtual range
+ * to ram. The device memory allocation in which the device page is found is
+ * migrated in its entirety.
+ *
+ * Returns:
+ * VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_pagemap_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
+ .page_free = drm_pagemap_page_free,
+ .migrate_to_ram = drm_pagemap_migrate_to_ram,
+};
+
+/**
+ * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Returns:
+ * Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
+{
+ return &drm_pagemap_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
+
+/**
+ * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
+ *
+ * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
+
+/**
+ * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
+ * @page: The struct page.
+ *
+ * Return: A pointer to the struct drm_pagemap of a device private page that
+ * was populated from the struct drm_pagemap. If the page was *not* populated
+ * from a struct drm_pagemap, the result is undefined and the function call
+ * may result in dereferencing and invalid address.
+ */
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+{
+ struct drm_pagemap_zdd *zdd = page->zone_device_data;
+
+ return zdd->devmem_allocation->dpagemap;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
+
+/**
+ * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
+ * @dpagemap: Pointer to the drm_pagemap managing the device memory
+ * @start: Start of the virtual range to populate.
+ * @end: End of the virtual range to populate.
+ * @mm: Pointer to the virtual address space.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ *
+ * Attempt to populate a virtual range with device memory pages,
+ * clearing them or migrating data from the existing pages if necessary.
+ * The function is best effort only, and implementations may vary
+ * in how hard they try to satisfy the request.
+ *
+ * Return: %0 on success, negative error code on error. If the hardware
+ * device was removed / unbound the function will return %-ENODEV.
+ */
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
+{
+ int err;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+ err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
+ timeslice_ms);
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_pagemap_populate_mm);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c627e42a7ce7..c8bb28dccdc1 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -23,6 +23,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -74,8 +75,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -105,21 +107,21 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
+ * integrated circuitry via a command bus. This function cannot fail (as it is
+ * called from the pre_enable call chain). There will always be a call to
+ * drm_panel_disable() afterwards.
*/
-int drm_panel_prepare(struct drm_panel *panel)
+void drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -138,11 +140,8 @@ int drm_panel_prepare(struct drm_panel *panel)
follower->funcs->panel_prepared, ret);
}
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -154,16 +153,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_unprepare(struct drm_panel *panel)
+void drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -176,7 +173,7 @@ int drm_panel_unprepare(struct drm_panel *panel)
*/
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -195,11 +192,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
}
panel->prepared = false;
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -209,26 +203,26 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
+ * this call completes. This function cannot fail (as it is called from the
+ * enable call chain). There will always be a call to drm_panel_disable()
+ * afterwards.
*/
-int drm_panel_enable(struct drm_panel *panel)
+void drm_panel_enable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
- return 0;
+ return;
}
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = true;
@@ -236,8 +230,6 @@ int drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -248,15 +240,13 @@ EXPORT_SYMBOL(drm_panel_enable);
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_disable(struct drm_panel *panel)
+void drm_panel_disable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -269,7 +259,7 @@ int drm_panel_disable(struct drm_panel *panel)
*/
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
- return 0;
+ return;
}
ret = backlight_disable(panel->backlight);
@@ -280,11 +270,9 @@ int drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = false;
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -317,6 +305,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
@@ -399,13 +474,51 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/* Find panel by fwnode. This should be identical to of_drm_find_panel(). */
+static struct drm_panel *find_panel_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct drm_panel *panel;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (dev_fwnode(panel->dev) == fwnode) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/* Find panel by follower device */
+static struct drm_panel *find_panel_by_dev(struct device *follower_dev)
+{
+ struct fwnode_handle *fwnode;
+ struct drm_panel *panel;
+
+ fwnode = fwnode_find_reference(dev_fwnode(follower_dev), "panel", 0);
+ if (IS_ERR(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ panel = find_panel_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return panel;
+}
+
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
- * At the moment panels can only be followed on device tree enabled systems.
+ *
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
@@ -417,7 +530,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_present(dev->of_node, "panel");
+ return device_property_present(dev, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
@@ -434,7 +547,6 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* If a follower is added to a panel that's already been turned on, the
* follower's prepare callback is called right away.
*
- * At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
@@ -444,16 +556,10 @@ EXPORT_SYMBOL(drm_is_panel_follower);
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
- struct device_node *panel_np;
struct drm_panel *panel;
int ret;
- panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
- if (!panel_np)
- return -ENODEV;
-
- panel = of_drm_find_panel(panel_np);
- of_node_put(panel_np);
+ panel = find_panel_by_dev(follower_dev);
if (IS_ERR(panel))
return PTR_ERR(panel);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index c477d98ade2b..598f812b7cb3 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -2,6 +2,7 @@
#include <linux/array_size.h>
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c554ad8f246b..3a218fb592ce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
@@ -517,6 +518,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b47ea25fdfaa..1d6312fa1429 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -6,7 +6,9 @@
* Tux Ascii art taken from cowsay written by Tony Monroe
*/
+#include <linux/export.h>
#include <linux/font.h>
+#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/kdebug.h>
@@ -154,6 +156,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
+static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
+{
+ u16 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
+{
+ u8 *p = vaddr + offset;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p = color & 0xff;
+}
+
+static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
+{
+ u32 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
+{
+ switch (cpp) {
+ case 2:
+ drm_panic_write_pixel16(vaddr, offset, color);
+ break;
+ case 3:
+ drm_panic_write_pixel24(vaddr, offset, color);
+ break;
+ case 4:
+ drm_panic_write_pixel32(vaddr, offset, color);
+ break;
+ default:
+ pr_debug_once("Can't blit with pixel width %d\n", cpp);
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, const u8 *sbuf8,
+ unsigned int spitch, struct drm_rect *clip,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ unsigned int height = drm_rect_height(clip);
+ unsigned int width = drm_rect_width(clip);
+ void *vaddr = NULL;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (!pages[new_page])
+ continue;
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (vaddr)
+ drm_panic_write_pixel(vaddr, offset, fg32, cpp);
+ }
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_blit - convert a monochrome image to a linear framebuffer
* @sb: destination scanout buffer
@@ -177,6 +263,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
+ if (sb->pages)
+ return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ sbuf8, spitch, clip, scale, fg_color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -209,6 +299,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
}
+static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, struct drm_rect *clip,
+ u32 color)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ void *vaddr = NULL;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x++) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = y * dpitch + x * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_fill - Fill a rectangle with a color
* @sb: destination scanout buffer
@@ -225,6 +344,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_fill_pixel(sb, clip, color);
+ if (sb->pages)
+ return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ clip, color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -709,16 +832,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (ret || !drm_panic_is_format_supported(sb.format))
+ goto unlock;
+
+ /* One of these should be set, or it can't draw pixels */
+ if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
+ goto unlock;
+
drm_panic_set_description(description);
- ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+ draw_panic_dispatch(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
- if (!ret && drm_panic_is_format_supported(sb.format)) {
- draw_panic_dispatch(&sb);
- if (plane->helper_private->panic_flush)
- plane->helper_private->panic_flush(plane);
- }
drm_panic_clear_description();
+
+unlock:
drm_panic_unlock(plane->dev, flags);
}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index f2a99681b998..09a9b452e8b7 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -27,7 +27,7 @@
//! * <https://github.com/erwanvivien/fast_qr>
//! * <https://github.com/bjguillot/qr>
-use kernel::{prelude::*, str::CStr};
+use kernel::prelude::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
struct Version(usize);
@@ -315,7 +315,7 @@ impl Segment<'_> {
}
}
- // Returns the size of the length field in bits, depending on QR Version.
+ /// Returns the size of the length field in bits, depending on QR Version.
fn length_bits_count(&self, version: Version) -> usize {
let Version(v) = version;
match self {
@@ -331,7 +331,7 @@ impl Segment<'_> {
}
}
- // Number of characters in the segment.
+ /// Number of characters in the segment.
fn character_count(&self) -> usize {
match self {
Segment::Binary(data) => data.len(),
@@ -366,8 +366,48 @@ impl Segment<'_> {
SegmentIterator {
segment: self,
offset: 0,
- carry: 0,
- carry_len: 0,
+ decfifo: Default::default(),
+ }
+ }
+}
+
+/// Max fifo size is 17 (max push) + 2 (max remaining)
+const MAX_FIFO_SIZE: usize = 19;
+
+/// A simple Decimal digit FIFO
+#[derive(Default)]
+struct DecFifo {
+ decimals: [u8; MAX_FIFO_SIZE],
+ len: usize,
+}
+
+impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+ for i in (0..self.len).rev() {
+ self.decimals[i + len] = self.decimals[i];
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+ chunk /= 10;
+ }
+ self.len += len;
+ }
+
+ /// Pop 3 decimal digits from the FIFO
+ fn pop3(&mut self) -> Option<(u16, usize)> {
+ if self.len == 0 {
+ None
+ } else {
+ let poplen = 3.min(self.len);
+ self.len -= poplen;
+ let mut out = 0;
+ let mut exp = 1;
+ for i in 0..poplen {
+ out += u16::from(self.decimals[self.len + i]) * exp;
+ exp *= 10;
+ }
+ Some((out, NUM_CHARS_BITS[poplen]))
}
}
}
@@ -375,8 +415,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u64,
- carry_len: usize,
+ decfifo: DecFifo,
}
impl Iterator for SegmentIterator<'_> {
@@ -386,7 +425,7 @@ impl Iterator for SegmentIterator<'_> {
match self.segment {
Segment::Binary(data) => {
if self.offset < data.len() {
- let byte = data[self.offset] as u16;
+ let byte = u16::from(data[self.offset]);
self.offset += 1;
Some((byte, 8))
} else {
@@ -394,31 +433,17 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len < 3 && self.offset < data.len() {
- // If there are less than 3 decimal digits in the carry,
- // take the next 7 bytes of input, and add them to the carry.
+ if self.decfifo.len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the fifo,
+ // take the next 7 bytes of input, and push them to the fifo.
let mut buf = [0u8; 8];
let len = 7.min(data.len() - self.offset);
buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
let chunk = u64::from_le_bytes(buf);
- let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
- self.carry = chunk + self.carry * pow;
+ self.decfifo.push(chunk, BYTES_TO_DIGITS[len]);
self.offset += len;
- self.carry_len += BYTES_TO_DIGITS[len];
- }
- match self.carry_len {
- 0 => None,
- len => {
- // take the next 3 decimal digits of the carry
- // and return 10bits of numeric data.
- let out_len = 3.min(len);
- self.carry_len -= out_len;
- let pow = u64::pow(10, self.carry_len as u32);
- let out = (self.carry / pow) as u16;
- self.carry = self.carry % pow;
- Some((out, NUM_CHARS_BITS[out_len]))
- }
}
+ self.decfifo.pop3()
}
}
}
@@ -569,8 +594,8 @@ struct EncodedMsgIterator<'a> {
impl Iterator for EncodedMsgIterator<'_> {
type Item = u8;
- // Send the bytes in interleaved mode, first byte of first block of group1,
- // then first byte of second block of group1, ...
+ /// Send the bytes in interleaved mode, first byte of first block of group1,
+ /// then first byte of second block of group1, ...
fn next(&mut self) -> Option<Self::Item> {
let em = self.em;
let blocks = em.g1_blocks + em.g2_blocks;
@@ -638,7 +663,7 @@ impl QrImage<'_> {
self.data.fill(0);
}
- // Set pixel to light color.
+ /// Set pixel to light color.
fn set(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
let mut v = self.data[off];
@@ -646,13 +671,13 @@ impl QrImage<'_> {
self.data[off] = v;
}
- // Invert a module color.
+ /// Invert a module color.
fn xor(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
self.data[off] ^= 0x80 >> (x % 8);
}
- // Draw a light square at (x, y) top left corner.
+ /// Draw a light square at (x, y) top left corner.
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
for k in 0..size {
self.set(x + k, y);
@@ -784,7 +809,7 @@ impl QrImage<'_> {
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
}
- // Returns true if the module is reserved (Not usable for data and EC).
+ /// Returns true if the module is reserved (Not usable for data and EC).
fn is_reserved(&self, x: u8, y: u8) -> bool {
self.is_alignment(x, y)
|| self.is_finder(x, y)
@@ -793,13 +818,14 @@ impl QrImage<'_> {
|| self.is_version_info(x, y)
}
- // Last module to draw, at bottom left corner.
+ /// Last module to draw, at bottom left corner.
fn is_last(&self, x: u8, y: u8) -> bool {
x == 0 && y == self.width - 1
}
- // Move to the next module according to QR code order.
- // From bottom right corner, to bottom left corner.
+ /// Move to the next module according to QR code order.
+ ///
+ /// From bottom right corner, to bottom left corner.
fn next(&self, x: u8, y: u8) -> (u8, u8) {
let x_adj = if x <= 6 { x + 1 } else { x };
let column_type = (self.width - x_adj) % 4;
@@ -812,7 +838,7 @@ impl QrImage<'_> {
}
}
- // Find next module that can hold data.
+ /// Find next module that can hold data.
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
let (mut x, mut y) = self.next(x, y);
while self.is_reserved(x, y) && !self.is_last(x, y) {
@@ -841,7 +867,7 @@ impl QrImage<'_> {
}
}
- // Apply checkerboard mask to all non-reserved modules.
+ /// Apply checkerboard mask to all non-reserved modules.
fn apply_mask(&mut self) {
for x in 0..self.width {
for y in 0..self.width {
@@ -852,7 +878,7 @@ impl QrImage<'_> {
}
}
- // Draw the QR code with the provided data iterator.
+ /// Draw the QR code with the provided data iterator.
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
// First clear the table, as it may have already some data.
self.clear();
@@ -876,7 +902,7 @@ impl QrImage<'_> {
/// will be encoded as binary segment, otherwise it will be encoded
/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c585f1e8803e..cb0f68d7f8ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a28b22fdd7a4..38f82391bfda 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -141,6 +142,14 @@
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*
+ * IN_FORMATS_ASYNC:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane for asynchronous flips. The blob is a struct
+ * drm_format_modifier_blob. Userspace cannot change this property. This is an
+ * optional property and if not present then user should expect a failure in
+ * atomic ioctl when the modifier/format is not supported by that plane under
+ * asynchronous flip.
+ *
* SIZE_HINTS:
* Blob property which contains the set of recommended plane size
* which can used for simple "cursor like" use cases (eg. no scaling).
@@ -185,9 +194,13 @@ modifiers_ptr(struct drm_format_modifier_blob *blob)
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
-static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+ struct drm_plane *plane,
+ bool (*format_mod_supported)
+ (struct drm_plane *plane,
+ u32 format,
+ u64 modifier))
{
- const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
@@ -213,7 +226,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return -1;
+ return NULL;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -229,10 +242,10 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (!plane->funcs->format_mod_supported ||
- plane->funcs->format_mod_supported(plane,
- plane->format_types[j],
- plane->modifiers[i])) {
+ if (!format_mod_supported ||
+ format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
@@ -243,10 +256,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
- drm_object_attach_property(&plane->base, config->modifiers_property,
- blob->base.id);
-
- return 0;
+ return blob;
}
/**
@@ -358,6 +368,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
@@ -469,8 +480,24 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_plane_create_hotspot_properties(plane);
}
- if (format_modifier_count)
- create_in_format_blob(dev, plane);
+ if (format_modifier_count) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->modifiers_property,
+ blob->base.id);
+ }
+
+ if (plane->funcs->format_mod_supported_async) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported_async);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->async_modifiers_property,
+ blob->base.id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7982be4b0306..747d248aaf02 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -23,6 +23,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bdb51c8f262e..a23fc712a8b7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
}
mutex_lock(&dev->object_name_lock);
- /* re-export the original imported/exported object */
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
+ goto out_have_obj;
+ }
+
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
@@ -599,6 +605,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
/*
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
@@ -608,7 +615,16 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
!obj->funcs->get_sg_table)
return -ENOSYS;
- return drm_gem_pin(obj);
+ if (!obj->funcs->pin)
+ return 0;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = obj->funcs->pin(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -625,8 +641,16 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
- drm_gem_unpin(obj);
+ if (!obj->funcs->unpin)
+ return;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (drm_WARN_ON(obj->dev, ret))
+ return;
+ obj->funcs->unpin(obj);
+ dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -707,7 +731,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -723,7 +747,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -804,7 +828,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -911,6 +934,26 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
+ * drm_gem_is_prime_exported_dma_buf -
+ * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
+ * @dev: drm_device to check against
+ * @dma_buf: dma-buf object to import
+ *
+ * Return: true if the DMA-BUF was exported from a GEM object belonging
+ * to @dev, false otherwise.
+ */
+
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
+}
+EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
+
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
@@ -934,16 +977,14 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
- if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
obj = dma_buf->priv;
- if (obj->dev == dev) {
- /*
- * Importing dmabuf exported from our own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_get(obj);
- return obj;
- }
+ drm_gem_object_get(obj);
+ return obj;
}
if (!dev->driver->gem_prime_import_sg_table)
@@ -998,7 +1039,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 79517bd4418f..ded9461df5f2 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 6cc39e30781f..8959f7084e0b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7ba16323e7c2..6b3541159c0f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index dd33fec5aabd..c0948586b7fd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -7,6 +7,7 @@
*/
#include <linux/average.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 250819fbc5ce..fcbcaaa36b5f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,6 +3,7 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
index 38cc7a123819..879ea33dbbc4 100644
--- a/drivers/gpu/drm/drm_suballoc.c
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -42,6 +42,8 @@
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..e1b0fa4000cd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -195,6 +195,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
@@ -741,7 +742,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +756,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +783,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +880,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +890,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +912,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +922,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 60c1f26edb6f..a455c56dbbeb 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -319,7 +319,7 @@ static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 0,
- .read_new = edid_show,
+ .read = edid_show,
};
static const struct bin_attribute *const connector_bin_attrs[] = {
@@ -329,7 +329,7 @@ static const struct bin_attribute *const connector_bin_attrs[] = {
static const struct attribute_group connector_dev_group = {
.attrs = connector_dev_attrs,
- .bin_attrs_new = connector_bin_attrs,
+ .bin_attrs = connector_bin_attrs,
};
static const struct attribute_group *connector_dev_groups[] = {
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 78958ddf8485..46f59883183d 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -487,7 +487,8 @@ out:
static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
+ struct drm_vblank_crtc *vblank = timer_container_of(vblank, t,
+ disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 9cc71120246f..e4e1873f0e1e 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -2,6 +2,8 @@
#include <uapi/linux/sched/types.h>
+#include <linux/export.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 83229a031af0..58659c16874c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,6 +23,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index edbeab88ff2b..95b8a2e4bda6 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -10,6 +10,7 @@
*/
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -343,17 +344,18 @@ EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
/**
* drm_writeback_connector_cleanup - Cleanup the writeback connector
* @dev: DRM device
- * @wb_connector: Pointer to the writeback connector to clean up
+ * @data: Pointer to the writeback connector to clean up
*
* This will decrement the reference counter of blobs and destroy properties. It
* will also clean the remaining jobs in this writeback connector. Caution: This helper will not
* clean up the attached encoder and the drm_connector.
*/
static void drm_writeback_connector_cleanup(struct drm_device *dev,
- struct drm_writeback_connector *wb_connector)
+ void *data)
{
unsigned long flags;
struct drm_writeback_job *pos, *n;
+ struct drm_writeback_connector *wb_connector = data;
delete_writeback_properties(dev);
drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
@@ -405,7 +407,7 @@ int drmm_writeback_connector_init(struct drm_device *dev,
if (ret)
return ret;
- ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup,
+ ret = drmm_add_action_or_reset(dev, drm_writeback_connector_cleanup,
wb_connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 42e57d142554..40a50c60dfff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3c0a5c3e0e3d..76c742328edb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -534,7 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- 1, submit->ctx);
+ 1, submit->ctx, file->client_id);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 76a3a3e517d8..df4232d7e135 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -40,11 +40,11 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
int change;
/*
- * If the GPU managed to complete this jobs fence, the timout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* If the GPU is still making forward progress on the front-end (which
@@ -70,7 +70,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_primid = primid;
gpu->hangcheck_fence = gpu->completed_fence;
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
/* block scheduler */
@@ -86,11 +86,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-
-out_no_timeout:
- list_add(&sched_job->list, &sched_job->sched->pending_list);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f91daefa9d2b..805aa28c1723 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -636,6 +636,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->drm_dev)
goto out;
+ /* check if crtc and vblank have been initialized properly */
+ if (!drm_dev_has_vblank(ctx->drm_dev))
+ goto out;
+
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index fc1c5608db96..ddd73e7f26a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -56,6 +56,7 @@ static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count)
@@ -76,7 +77,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
@@ -94,9 +95,9 @@ err:
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_framebuffer *fb;
int i;
@@ -124,7 +125,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+ fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 2f841bbdddc5..fdc6cb40cc9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,6 +14,7 @@
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 9526a25e90ac..93de25b77e68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -116,7 +116,10 @@ int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(exynos_gem);
helper->fb =
- exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
+ exynos_drm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index c394cc702d7d..205c238cc73a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -187,6 +187,7 @@ struct fimd_context {
u32 i80ifcon;
bool i80_if;
bool suspended;
+ bool dp_clk_enabled;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
atomic_t win_updated;
@@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
struct fimd_context *ctx = container_of(clk, struct fimd_context,
dp_clk);
u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+
+ if (enable == ctx->dp_clk_enabled)
+ return;
+
+ if (enable)
+ pm_runtime_resume_and_get(ctx->dev);
+
+ ctx->dp_clk_enabled = enable;
writel(val, ctx->regs + DP_MIE_CLKCON);
+
+ if (!enable)
+ pm_runtime_put(ctx->dev);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4787fee4696f..e3fbb45f37a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -7,7 +7,6 @@
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <linux/shmem_fs.h>
#include <linux/module.h>
@@ -174,7 +173,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
+ DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %p\n", obj->filp);
return exynos_gem;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index ea9f66037600..03c8490af4f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -271,7 +271,7 @@ static inline struct exynos_drm_ipp_task *
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %p\n", task);
return task;
}
@@ -339,7 +339,7 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
}
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Got task %pK configuration from userspace\n",
+ "Got task %p configuration from userspace\n",
task);
return 0;
}
@@ -394,7 +394,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
- DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %p\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
@@ -559,7 +559,7 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: %s format not supported\n",
+ "Task %p: %s format not supported\n",
task, buf == src ? "src" : "dst");
return -EINVAL;
}
@@ -609,7 +609,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %p\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
@@ -625,7 +625,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: defined area is outside provided buffers\n",
+ "Task %p: defined area is outside provided buffers\n",
task);
return -EINVAL;
}
@@ -642,7 +642,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
- DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: hw capabilities exceeded\n",
task);
return -EINVAL;
}
@@ -655,7 +655,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
if (ret)
return ret;
- DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %p: all checks done.\n",
task);
return ret;
@@ -667,25 +667,25 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %p\n",
task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: src buffer setup failed\n",
+ "Task %p: src buffer setup failed\n",
task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: dst buffer setup failed\n",
+ "Task %p: dst buffer setup failed\n",
task);
return ret;
}
- DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: buffers prepared.\n",
task);
return ret;
@@ -764,7 +764,7 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %p done: %d\n",
ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
@@ -807,7 +807,7 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
spin_unlock_irqrestore(&ipp->lock, flags);
DRM_DEV_DEBUG_DRIVER(ipp->dev,
- "ipp: %d, selected task %pK to run\n", ipp->id,
+ "ipp: %d, selected task %p to run\n", ipp->id,
task);
ret = ipp->funcs->commit(ipp, task);
@@ -917,14 +917,14 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
DRM_DEV_DEBUG_DRIVER(ipp->dev,
- "ipp: %d, nonblocking processing task %pK\n",
+ "ipp: %d, nonblocking processing task %p\n",
ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
- DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %p\n",
ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b34ec6728337..29a8366513fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
struct resource res;
int ret, i;
- mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
- if (!mic) {
+ mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
+ if (IS_ERR(mic)) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(mic);
goto err;
}
@@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
- mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e644e2382d77..e094b8bbc0f1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -159,7 +159,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = from_timer(ctx, t, timer);
+ struct vidi_context *ctx = timer_container_of(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index aa2ea128aa2f..a2acaa699dd5 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ depends on DRM && PCI && X86 && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 8edefea2ef59..4a37136f90f4 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -6,7 +6,6 @@
**************************************************************************/
#include <linux/fb.h>
-#include <linux/pfn_t.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
- err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ err = vmf_insert_mixed(vma, address, pfn);
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
@@ -203,7 +202,10 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return PTR_ERR(backing);
obj = &backing->base;
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ fb = psb_framebuffer_create(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_drm_gem_object_put;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1a374702b696..e69b537ded6b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -29,24 +29,23 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- const struct drm_format_info *info;
int ret;
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
- info = drm_get_format_info(dev, mode_cmd);
- if (!info || !info->depth || info->cpp[0] > 4)
+ if (!info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
@@ -59,6 +58,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
+ * @info: pixel format information
* @mode_cmd: the description of the requested mode
* @obj: the backing object
*
@@ -68,6 +68,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* TODO: review object references
*/
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -78,7 +79,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -96,6 +97,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
@@ -110,7 +112,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- fb = psb_framebuffer_create(dev, cmd, obj);
+ fb = psb_framebuffer_create(dev, info, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..ea9b41af0867 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 7f77cb2b2751..0b27112ec46f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -594,6 +594,7 @@ extern void psb_modeset_cleanup(struct drm_device *dev);
/* framebuffer */
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 9dc9dcd1b09f..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..5385a2126e45 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, gdrm);
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
drm_kms_helper_poll_init(drm);
@@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d6fb25388722 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -16,7 +16,6 @@
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
- struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 77cfcf37ddd2..8d548d08f127 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -188,8 +188,13 @@ retry:
} else if (format->format == DRM_FORMAT_RGB332) {
drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
- gud_is_big_endian());
+ if (gud_is_big_endian()) {
+ drm_fb_xrgb8888_to_rgb565be(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ }
} else if (format->format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
@@ -234,7 +239,7 @@ struct gud_usb_bulk_context {
static void gud_usb_bulk_timeout(struct timer_list *t)
{
- struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
+ struct gud_usb_bulk_context *ctx = timer_container_of(ctx, t, timer);
usb_sg_cancel(&ctx->sgr);
}
@@ -261,7 +266,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
else if (ctx.sgr.bytes != len)
ret = -EIO;
- destroy_timer_on_stack(&ctx.timer);
+ timer_destroy_on_stack(&ctx.timer);
return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 98d77d74999d..d1f3f5793f34 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,7 +2,6 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI
- depends on MMU
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 95a4ed599d98..1f65c683282f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
- dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
+ hibmc_drm_debugfs.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
index 0a903cce1fa9..8732cd1d8cb6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include "dp_comm.h"
#include "dp_reg.h"
+#include "dp_hw.h"
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
@@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
+ struct hibmc_dp_dev *dp = dp_priv->dp_dev;
u32 aux_cmd;
int ret;
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
@@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
return hibmc_dp_aux_parse_xfer(dp, msg);
}
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+void hibmc_dp_aux_init(struct hibmc_dp *dp)
{
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
HIBMC_DP_MIN_PULSE_NUM);
dp->aux.transfer = hibmc_dp_aux_xfer;
- dp->aux.is_remote = 0;
+ dp->aux.name = "HIBMC DRM dp aux";
+ dp->aux.drm_dev = dp->drm_dev;
drm_dp_aux_init(&dp->aux);
+ dp->dp_dev->aux = &dp->aux;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
index 2c52a4476c4d..4add05c7f161 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <drm/display/drm_dp_helper.h>
+#include "dp_hw.h"
+
#define HIBMC_DP_LANE_NUM_MAX 2
struct hibmc_link_status {
@@ -32,12 +34,13 @@ struct hibmc_dp_link {
};
struct hibmc_dp_dev {
- struct drm_dp_aux aux;
+ struct drm_dp_aux *aux;
struct drm_device *dev;
void __iomem *base;
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
struct hibmc_dp_link link;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ void __iomem *serdes_base;
};
#define dp_field_modify(reg_value, mask, val) \
@@ -57,7 +60,10 @@ struct hibmc_dp_dev {
mutex_unlock(&_dp->lock); \
} while (0)
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+void hibmc_dp_aux_init(struct hibmc_dp *dp);
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
index 74dd9956144e..08f9e1caf7fc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -15,5 +15,7 @@
#define HIBMC_DP_CLK_EN 0x7
#define HIBMC_DP_SYNC_EN_MASK 0x3
#define HIBMC_DP_LINK_RATE_CAL 27
+#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
+#define HIBMC_DP_INT_ENABLE 0xc
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
index a8d543881c09..8f0daec7d174 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
+ HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
}
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
@@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
{
struct drm_device *drm_dev = dp->drm_dev;
struct hibmc_dp_dev *dp_dev;
+ int ret;
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
if (!dp_dev)
@@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
dp_dev->dev = drm_dev;
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
- hibmc_dp_aux_init(dp_dev);
+ hibmc_dp_aux_init(dp);
+
+ ret = hibmc_dp_serdes_init(dp_dev);
+ if (ret)
+ return ret;
dp_dev->link.cap.lanes = 0x2;
- dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
/* hdcp data */
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
@@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
return 0;
}
+void hibmc_dp_enable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+}
+
+void hibmc_dp_disable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+}
+
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+}
+
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
{
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
@@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
return 0;
}
+
+void hibmc_dp_reset_link(struct hibmc_dp *dp)
+{
+ dp->dp_dev->link.status.clock_recovered = false;
+ dp->dp_dev->link.status.channel_equalized = false;
+}
+
+static const struct hibmc_dp_color_raw g_rgb_raw[] = {
+ {CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
+ {CBAR_WHITE, 0xfff, 0xfff, 0xfff},
+ {CBAR_RED, 0xfff, 0x000, 0x000},
+ {CBAR_ORANGE, 0xfff, 0x800, 0x000},
+ {CBAR_YELLOW, 0xfff, 0xfff, 0x000},
+ {CBAR_GREEN, 0x000, 0xfff, 0x000},
+ {CBAR_CYAN, 0x000, 0x800, 0x800},
+ {CBAR_BLUE, 0x000, 0x000, 0xfff},
+ {CBAR_PURPLE, 0x800, 0x000, 0x800},
+ {CBAR_BLACK, 0x000, 0x000, 0x000},
+};
+
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ struct hibmc_dp_color_raw raw_data;
+
+ if (cfg->enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
+ cfg->self_timing);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
+ cfg->dynamic_rate);
+ if (cfg->pattern == CBAR_COLOR_BAR) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
+ } else {
+ raw_data = g_rgb_raw[cfg->pattern];
+ drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
+ raw_data.g_value, raw_data.b_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
+ raw_data.r_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
+ raw_data.g_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
+ raw_data.b_value);
+ }
+ }
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
index 4dc13b3d9875..665f5b166dfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -10,19 +10,55 @@
#include <drm/drm_encoder.h>
#include <drm/drm_connector.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_dp_helper.h>
struct hibmc_dp_dev;
+enum hibmc_dp_cbar_pattern {
+ CBAR_COLOR_BAR,
+ CBAR_WHITE,
+ CBAR_RED,
+ CBAR_ORANGE,
+ CBAR_YELLOW,
+ CBAR_GREEN,
+ CBAR_CYAN,
+ CBAR_BLUE,
+ CBAR_PURPLE,
+ CBAR_BLACK,
+};
+
+struct hibmc_dp_color_raw {
+ enum hibmc_dp_cbar_pattern pattern;
+ u32 r_value;
+ u32 g_value;
+ u32 b_value;
+};
+
+struct hibmc_dp_cbar_cfg {
+ u8 enable;
+ u8 self_timing;
+ u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
+ enum hibmc_dp_cbar_pattern pattern;
+};
+
struct hibmc_dp {
struct hibmc_dp_dev *dp_dev;
struct drm_device *drm_dev;
struct drm_encoder encoder;
struct drm_connector connector;
void __iomem *mmio;
+ struct drm_dp_aux aux;
+ struct hibmc_dp_cbar_cfg cfg;
+ u32 irq_status;
};
int hibmc_dp_hw_init(struct hibmc_dp *dp);
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
+void hibmc_dp_reset_link(struct hibmc_dp *dp);
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
+void hibmc_dp_enable_int(struct hibmc_dp *dp);
+void hibmc_dp_disable_int(struct hibmc_dp *dp);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..74f7832ea53e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -9,6 +9,22 @@
#define HIBMC_EQ_MAX_RETRY 5
+static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_1_62:
+ return DP_SERDES_BW_1_62;
+ case DP_LINK_BW_2_7:
+ return DP_SERDES_BW_2_7;
+ case DP_LINK_BW_5_4:
+ return DP_SERDES_BW_5_4;
+ case DP_LINK_BW_8_1:
+ return DP_SERDES_BW_8_1;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
{
u8 buf[2];
@@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set rate and lane count */
buf[0] = dp->link.cap.link_rate;
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
- ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
@@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set 8b/10b and downspread */
buf[0] = DP_SPREAD_AMP_0_5;
buf[1] = DP_SET_ANSI_8B10B;
- ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
}
- ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
- if (ret)
- drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
-
- return ret;
+ return 0;
}
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
@@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
return ret;
for (i = 0; i < dp->link.cap.lanes; i++)
- train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
return false;
}
-static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
{
+ int ret;
+
switch (dp->link.cap.link_rate) {
case DP_LINK_BW_2_7:
dp->link.cap.link_rate = DP_LINK_BW_1_62;
- return 0;
+ break;
case DP_LINK_BW_5_4:
dp->link.cap.link_rate = DP_LINK_BW_2_7;
- return 0;
+ break;
case DP_LINK_BW_8_1:
dp->link.cap.link_rate = DP_LINK_BW_5_4;
- return 0;
+ break;
default:
return -EINVAL;
}
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ return hibmc_dp_serdes_rate_switch(ret, dp);
}
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
@@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
switch (dp->link.cap.lanes) {
case 0x2:
dp->link.cap.lanes--;
+ drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
break;
case 0x1:
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
@@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
- drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
}
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
return ret;
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
- drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
@@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
}
hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
dp->link.train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
struct hibmc_dp_link *link = &dp->link;
int ret;
+ ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ dp->link.cap.lanes = 0x2;
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ ret = hibmc_dp_serdes_rate_switch(ret, dp);
+ if (ret)
+ return ret;
+
while (true) {
ret = hibmc_dp_link_training_cr_pre(dp);
if (ret)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
index 4a515c726d52..394b1e933c3a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -5,72 +5,128 @@
#define DP_REG_H
#define HIBMC_DP_AUX_CMD_ADDR 0x50
+
#define HIBMC_DP_AUX_WR_DATA0 0x54
#define HIBMC_DP_AUX_WR_DATA1 0x58
#define HIBMC_DP_AUX_WR_DATA2 0x5c
#define HIBMC_DP_AUX_WR_DATA3 0x60
#define HIBMC_DP_AUX_RD_DATA0 0x64
+
#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+
#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+
#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+
#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+
#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
+
#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_MSA1 0x11c
#define HIBMC_DP_VIDEO_MSA2 0x120
+
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#define HIBMC_DP_COLOR_BAR_CTRL 0x260
+#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
+
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_HDCP_CFG 0x600
+
#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+
#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+
#define HIBMC_DP_INTR_ENABLE 0x720
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
-#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
-#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
-#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
-#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
-#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
-#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
-#define HIBMC_DP_CFG_AUX_REQ BIT(0)
-#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
-#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
-#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
-#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
-#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
-#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
-#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
-#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
-#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
-#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
-#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
-#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
-#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+#define HIBMC_DP_INTSTAT 0x1e0724
+#define HIBMC_DP_INTCLR 0x1e0728
+
+/* dp serdes reg */
+#define HIBMC_DP_HOST_OFFSET 0x10000
+#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
+#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
+#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
+#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
+#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
+#define DP_SERDES_DONE 0x3
+
+/* dp serdes TX-Deempth Configuration */
+#define DP_SERDES_VOL0_PRE0 0x280
+#define DP_SERDES_VOL0_PRE1 0x2300
+#define DP_SERDES_VOL0_PRE2 0x53c0
+#define DP_SERDES_VOL0_PRE3 0x8400
+#define DP_SERDES_VOL1_PRE0 0x380
+#define DP_SERDES_VOL1_PRE1 0x3440
+#define DP_SERDES_VOL1_PRE2 0x6480
+#define DP_SERDES_VOL2_PRE0 0x4c1
+#define DP_SERDES_VOL2_PRE1 0x4500
+#define DP_SERDES_VOL3_PRE0 0x600
+#define DP_SERDES_BW_8_1 0x3
+#define DP_SERDES_BW_5_4 0x2
+#define DP_SERDES_BW_2_7 0x1
+#define DP_SERDES_BW_1_62 0x0
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
new file mode 100644
index 000000000000..676059d4c1e6
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_config.h"
+#include "dp_reg.h"
+
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
+{
+ static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
+ DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
+ {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
+ DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
+ DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
+ int cfg[2];
+ int i;
+
+ for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
+ cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
+ [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
+ if (!cfg[i])
+ return -EINVAL;
+
+ /* lane1 offset is 4 */
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
+ }
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
+{
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
+ return -EAGAIN;
+ }
+
+ if (rate < DP_SERDES_BW_8_1)
+ drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
+ rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
+
+ return 0;
+}
+
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
+{
+ dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
+
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
+
+ return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
new file mode 100644
index 000000000000..f585387c3a49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+
+#define MAX_BUF_SIZE 12
+
+static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hibmc_drm_private *priv = file_inode(file)->i_private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int ret, idx;
+ u8 buf[MAX_BUF_SIZE];
+
+ if (count >= MAX_BUF_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ /* Only 4 parameters is allowed, the ranger are as follow:
+ * [0] enable/disable colorbar feature
+ 0: enable colorbar, 1: disable colorbar
+ * [1] the timing source of colorbar displaying
+ 0: timing follows XDP, 1: internal self timing
+ * [2] the movment of colorbar displaying
+ 0: static colorbar image,
+ * 1~255: right shifting a type of color per (1~255)frames
+ * [3] the color type of colorbar displaying
+ 0~9: color bar, white, red, orange,
+ * yellow, green, cyan, bule, pupper, black
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
+ &cfg->dynamic_rate, &cfg->pattern) != 4) {
+ return -EINVAL;
+ }
+
+ if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
+ return -EINVAL;
+
+ ret = drm_dev_enter(&priv->dev, &idx);
+ if (!ret)
+ return -ENODEV;
+
+ hibmc_dp_set_cbar(&priv->dp, cfg);
+
+ drm_dev_exit(idx);
+
+ return count;
+}
+
+static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
+{
+ struct hibmc_drm_private *priv = m->private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int idx;
+
+ if (!drm_dev_enter(&priv->dev, &idx))
+ return -ENODEV;
+
+ seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
+ cfg->dynamic_rate, cfg->pattern);
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hibmc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
+}
+
+static const struct file_operations hibmc_dbg_fops = {
+ .owner = THIS_MODULE,
+ .write = hibmc_control_write,
+ .read = seq_read,
+ .open = hibmc_open,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+ struct drm_device *dev = connector->dev;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+
+ /* create the file in drm directory, so we don't need to remove manually */
+ debugfs_create_file("colorbar-cfg", 0200,
+ root, priv, &hibmc_dbg_fops);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
index 603d6b198a54..d06832e62e96 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -13,27 +13,64 @@
#include "hibmc_drm_drv.h"
#include "dp/dp_hw.h"
+#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
+
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *drm_edid;
int count;
- count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
- connector->dev->mode_config.max_height);
- drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
return count;
}
+static int hibmc_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ mdelay(200);
+
+ return drm_connector_helper_detect_from_ddc(connector, ctx, force);
+}
+
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
.get_modes = hibmc_dp_connector_get_modes,
+ .detect_ctx = hibmc_dp_detect,
};
+static int hibmc_dp_late_register(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ hibmc_dp_enable_int(dp);
+
+ return drm_dp_aux_register(&dp->aux);
+}
+
+static void hibmc_dp_early_unregister(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ drm_dp_aux_unregister(&dp->aux);
+
+ hibmc_dp_disable_int(dp);
+}
+
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = hibmc_dp_late_register,
+ .early_unregister = hibmc_dp_early_unregister,
+ .debugfs_init = hibmc_debugfs_init,
};
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
@@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
.atomic_disable = hibmc_dp_encoder_disable,
};
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
+ drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
+ hibmc_dp_hpd_cfg(&priv->dp);
+ } else {
+ drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
+ hibmc_dp_reset_link(&priv->dp);
+ }
+
+ if (dev->registered)
+ drm_connector_helper_hpd_irq_event(&priv->dp.connector);
+
+ drm_dev_exit(idx);
+
+ return IRQ_HANDLED;
+}
+
int hibmc_dp_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
@@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
if (ret) {
drm_err(dev, "init dp connector failed: %d\n", ret);
return ret;
@@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index e6de6d5edf6b..768b97f9e74a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -28,12 +28,12 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
-#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
-#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+#include "dp/dp_reg.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_DP_INTSTAT);
+ if (status) {
+ priv->dp.irq_status = status;
+ writel(status, priv->mmio + HIBMC_DP_INTCLR);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
- /* if DP existed, init DP */
- if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
- HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ /*
+ * If the serdes reg is readable and is not equal to 0,
+ * DP block exists and initializes it.
+ */
+ ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
+ if (ret) {
ret = hibmc_dp_init(priv);
if (ret)
drm_err(dev, "failed to init dp: %d\n", ret);
@@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
return 0;
}
-static int hibmc_unload(struct drm_device *dev)
+static void hibmc_unload(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
drm_atomic_helper_shutdown(dev);
+}
- free_irq(pdev->irq, dev);
+static int hibmc_msi_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ char name[32] = {0};
+ int valid_irq_num;
+ int irq;
+ int ret;
- pci_disable_msi(to_pci_dev(dev->dev));
+ ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
+ HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
+ if (ret < 0) {
+ drm_err(dev, "enabling MSI failed: %d\n", ret);
+ return ret;
+ }
+
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+ snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
+ dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
+
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+ /* PCI devices require shared interrupts. */
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+ IRQF_SHARED, name, dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+ IRQF_SHARED, name, dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(pdev);
+ ret = hibmc_msi_init(dev);
if (ret) {
- drm_warn(dev, "enabling MSI failed: %d\n", ret);
- } else {
- /* PCI devices require shared interrupts. */
- ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret)
- drm_warn(dev, "install irq failed: %d\n", ret);
+ drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
+ goto err;
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
- drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ hibmc_pci_remove(pdev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index d982f1e4b958..274feabe7df0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -22,6 +22,9 @@
#include "dp/dp_hw.h"
+#define HIBMC_MIN_VECTORS 1
+#define HIBMC_MAX_VECTORS 2
+
struct hibmc_vdac {
struct drm_device *dev;
struct drm_encoder encoder;
@@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
return container_of(connector, struct hibmc_vdac, connector);
}
+static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_dp, connector);
+}
+
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return container_of(dev, struct hibmc_drm_private, dev);
@@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
int hibmc_dp_init(struct hibmc_drm_private *priv);
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
+
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 05e19ea4c9f9..e8a527ede854 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..45c4eb008ad5 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index d2d8582b36df..9e776112c03e 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -11,7 +11,9 @@
struct hyperv_drm_device {
/* drm */
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
/* mode */
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6c6b57298797..945b9482bcb3 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -5,6 +5,8 @@
#include <linux/hyperv.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -15,7 +17,8 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
#include "hyperv_drm.h"
@@ -38,18 +41,6 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
return 0;
}
-static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
-}
-
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
@@ -98,30 +89,66 @@ static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
return 0;
}
-static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct hyperv_drm_device *hv = to_hv(crtc->dev);
+ struct drm_plane *plane = &hv->plane;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc_state *crtc_state = crtc->state;
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
- hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
}
-static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_enable = hyperv_crtc_helper_atomic_enable,
+};
+
+static const struct drm_crtc_funcs hyperv_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int hyperv_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
- if (fb->format->format != DRM_FORMAT_XRGB8888)
- return -EINVAL;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible)
+ return 0;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
@@ -132,53 +159,120 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
return 0;
}
-static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void hyperv_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_rect rect;
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_rect damage;
+ struct drm_rect dst_clip;
+ struct drm_atomic_helper_damage_iter iter;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = new_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ hyperv_blit_to_vram_rect(new_state->fb, &shadow_plane_state->data[0], &damage);
+ hyperv_update_dirt(hv->hdev, &damage);
+ }
+}
- if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
- hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
- hyperv_update_dirt(hv->hdev, &rect);
+static int hyperv_plane_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
}
+ return -ENODEV;
}
-static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
- .enable = hyperv_pipe_enable,
- .check = hyperv_pipe_check,
- .update = hyperv_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static void hyperv_plane_panic_flush(struct drm_plane *plane)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_rect rect;
+
+ if (!plane->state || !plane->state->fb)
+ return;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ hyperv_update_dirt(hv->hdev, &rect);
+}
+
+static const struct drm_plane_helper_funcs hyperv_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = hyperv_plane_atomic_check,
+ .atomic_update = hyperv_plane_atomic_update,
+ .get_scanout_buffer = hyperv_plane_get_scanout_buffer,
+ .panic_flush = hyperv_plane_panic_flush,
};
-static const uint32_t hyperv_formats[] = {
- DRM_FORMAT_XRGB8888,
+static const struct drm_plane_funcs hyperv_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static const uint64_t hyperv_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs hyperv_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
+ struct drm_device *dev = &hv->dev;
+ struct drm_encoder *encoder = &hv->encoder;
+ struct drm_plane *plane = &hv->plane;
+ struct drm_crtc *crtc = &hv->crtc;
+ struct drm_connector *connector = &hv->connector;
int ret;
- ret = drm_simple_display_pipe_init(&hv->dev,
- &hv->pipe,
- &hyperv_pipe_funcs,
- hyperv_formats,
- ARRAY_SIZE(hyperv_formats),
- hyperv_modifiers,
- &hv->connector);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &hyperv_plane_funcs,
+ hyperv_formats, ARRAY_SIZE(hyperv_formats),
+ hyperv_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(plane, &hyperv_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(plane);
- drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &hyperv_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &hyperv_crtc_helper_funcs);
- return 0;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder,
+ &hyperv_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
}
static enum drm_mode_status
@@ -221,12 +315,6 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
dev->mode_config.funcs = &hyperv_mode_config_funcs;
- ret = hyperv_conn_init(hv);
- if (ret) {
- drm_err(dev, "Failed to initialized connector.\n");
- return ret;
- }
-
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ed05b131ed3a..853543443072 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,19 +40,17 @@ i915-y += \
intel_pcode.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sbi.o \
intel_step.o \
intel_uncore.o \
intel_uncore_trace.o \
intel_wakeref.o \
- vlv_sideband.o \
+ vlv_iosf_sb.o \
vlv_suspend.o
# core peripheral code
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o \
soc/intel_rom.o
# core library code
@@ -220,12 +218,11 @@ i915-$(CONFIG_HWMON) += \
# modesetting core code
i915-y += \
display/hsw_ips.o \
- display/i9xx_plane.o \
display/i9xx_display_sr.o \
+ display/i9xx_plane.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
- display/intel_atomic_plane.o \
display/intel_audio.o \
display/intel_bios.o \
display/intel_bo.o \
@@ -247,6 +244,7 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
display/intel_display_wa.o \
@@ -266,6 +264,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
+ display/intel_flipq.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
@@ -281,12 +280,15 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
+ display/intel_plane.o \
display/intel_plane_initial.o \
display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
+ display/intel_sbi.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \
display/intel_tc.o \
@@ -295,7 +297,8 @@ i915-y += \
display/intel_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
- display/skl_watermark.o
+ display/skl_watermark.o \
+ display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -408,7 +411,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 206818f9ad49..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 10ab3cc73e58..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index d9c3152d4338..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 92d32d6b5bce..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,6 +26,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index b42c717085f3..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 280699438526..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 55b9e9bfcc4d..87f6b9602b16 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,15 +7,18 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -28,7 +31,6 @@
#include "intel_hotplug.h"
#include "intel_pch_display.h"
#include "intel_pps.h"
-#include "vlv_sideband.h"
static const struct dpll g4x_dpll[] = {
{ .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
@@ -60,14 +62,13 @@ static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
int i, count = 0;
if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (display->platform.cherryview) {
@@ -93,7 +94,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -141,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(display) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
@@ -183,7 +183,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct intel_display *display, bool state)
{
- bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(display, DP_A) & EDP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -205,12 +205,12 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+ intel_dp->DP &= ~EDP_PLL_FREQ_MASK;
if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_162MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_270MHZ;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -225,7 +225,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
if (display->platform.ironlake)
intel_wait_for_vblank_if_active(display, !crtc->pipe);
- intel_dp->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -243,7 +243,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
- intel_dp->DP &= ~DP_PLL_ENABLE;
+ intel_dp->DP &= ~EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -277,7 +277,6 @@ bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
u32 val;
@@ -287,13 +286,13 @@ bool g4x_dp_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (display->platform.ivybridge && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_IVB, val);
+ else if (HAS_PCH_CPT(display) && port != PORT_A)
ret &= cpt_dp_port_selected(display, port, pipe);
else if (display->platform.cherryview)
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_CHV, val);
else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK, val);
return ret;
}
@@ -338,7 +337,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
@@ -353,7 +351,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ if (HAS_PCH_CPT(display) && port != PORT_A) {
u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
@@ -389,13 +387,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+ pipe_config->lane_count = REG_FIELD_GET(DP_PORT_WIDTH_MASK, tmp) + 1;
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & EDP_PLL_FREQ_MASK) == EDP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -416,7 +413,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
@@ -429,7 +425,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
@@ -448,7 +444,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -519,7 +515,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
@@ -581,16 +577,10 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
intel_dp_link_down(encoder, old_crtc_state);
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void
@@ -1223,10 +1213,10 @@ static int g4x_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
- if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ if (HAS_PCH_SPLIT(display) && encoder->port != PORT_A)
crtc_state->has_pch_encoder = true;
ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
@@ -1279,7 +1269,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1353,7 +1342,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A))
dig_port->dp.set_link_train = cpt_set_link_train;
else
dig_port->dp.set_link_train = g4x_set_link_train;
@@ -1370,7 +1359,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (display->platform.valleyview || display->platform.cherryview ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_SPLIT(display) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 3dc2c59a3df0..2610f5702fb9 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -5,8 +5,9 @@
* HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <drm/drm_print.h>
+
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -14,6 +15,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
@@ -22,13 +24,11 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_sdvo.h"
-#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -37,7 +37,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(display) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -52,7 +52,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
@@ -134,9 +134,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -155,7 +154,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 tmp, flags = 0;
int dotclock;
@@ -186,7 +184,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+ if (!HAS_PCH_SPLIT(display) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -383,7 +381,6 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
@@ -401,7 +398,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -539,15 +536,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
@@ -682,7 +672,6 @@ static bool assert_hdmi_port_valid(struct intel_display *display, enum port port
bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -724,7 +713,7 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -745,9 +734,9 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 674a0e5f0858..927fe56aec77 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -5,19 +5,20 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val;
if (!crtc_state->ips_enabled)
@@ -38,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -64,8 +65,6 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bool need_vblank_wait = false;
if (!crtc_state->ips_enabled)
@@ -73,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -266,7 +265,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
return PTR_ERR(cdclk_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
return 0;
}
@@ -344,10 +343,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -361,7 +359,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
index 32abe9743014..935419441709 100644
--- a/drivers/gpu/drm/i915/display/i9xx_display_sr.c
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -5,10 +5,10 @@
#include <drm/drm_device.h>
-#include "i915_reg.h"
#include "i9xx_display_sr.h"
#include "i9xx_wm_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_gmbus.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 013295f66d56..f291ced989dc 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -2,24 +2,28 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
/* Primary plane formats for gen <= 3 */
@@ -333,10 +337,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- i9xx_plane_has_windowing(plane));
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
if (ret)
return ret;
@@ -630,85 +634,85 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static bool i9xx_plane_can_async_flip(u64 modifier)
@@ -820,7 +824,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
/* FIXME undocumented so not sure what's actually needed */
@@ -844,7 +848,7 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
if (intel_scanout_needs_vtd_wa(display))
@@ -889,6 +893,7 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs i8xx_plane_funcs = {
@@ -898,8 +903,30 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
+static void i9xx_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 dspcntr;
+ u32 reg;
+
+ dspcntr = intel_de_read_fw(display, DSPCNTR(display, i9xx_plane));
+ dspcntr &= ~DISP_TILED;
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(display) >= 4) {
+ reg = intel_de_read_fw(display, DSPSURF(display, i9xx_plane));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), reg);
+
+ } else {
+ reg = intel_de_read_fw(display, DSPADDR(display, i9xx_plane));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), reg);
+ }
+}
+
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
{
@@ -1042,6 +1069,8 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
}
}
+ plane->disable_tiling = i9xx_disable_tiling;
+
modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
@@ -1146,7 +1175,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 7c80e37c1c5f..1f9db5118777 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -9,7 +9,9 @@
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
#include "intel_mchbar_regs.h"
@@ -81,13 +83,14 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
i915->is_ddr3 == latency->is_ddr3 &&
@@ -96,102 +99,101 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *
return latency;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if (enable)
val &= ~FORCE_DDR_HIGH_FREQ;
else
val |= FORCE_DDR_HIGH_FREQ;
val &= ~FORCE_DDR_LOW_FREQ;
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -200,7 +202,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -235,17 +237,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -271,8 +273,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -280,22 +282,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -310,26 +310,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -337,22 +337,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -537,7 +537,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -555,7 +555,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -573,10 +573,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -626,11 +626,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -641,21 +641,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -663,47 +663,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -794,53 +793,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -848,72 +845,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -962,11 +959,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1017,10 +1014,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1033,13 +1030,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1056,8 +1053,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1069,7 +1066,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1109,7 +1106,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1117,7 +1114,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1137,9 +1134,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1281,7 +1278,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1311,7 +1308,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1329,23 +1326,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1376,7 +1373,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1386,7 +1383,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1408,7 +1405,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1420,23 +1417,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1444,30 +1441,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1486,18 +1483,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1505,13 +1502,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1532,7 +1529,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1546,8 +1543,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1616,11 +1613,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1631,9 +1628,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1659,10 +1656,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1675,8 +1672,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1686,7 +1683,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1703,7 +1700,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1734,8 +1731,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1745,7 +1742,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1755,7 +1752,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1855,6 +1852,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1871,8 +1869,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1889,8 +1887,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1902,12 +1900,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1919,12 +1917,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1936,14 +1934,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2018,16 +2016,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2046,7 +2044,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2061,35 +2059,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2097,33 +2095,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2131,7 +2129,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2152,7 +2150,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2167,7 +2165,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2175,39 +2173,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2216,7 +2213,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2226,29 +2223,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2257,25 +2254,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2284,11 +2281,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
struct drm_gem_object *obj;
obj = intel_fb_bo(crtc->base.primary->state->fb);
@@ -2304,10 +2301,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2320,7 +2317,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2328,20 +2325,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2352,34 +2349,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2534,24 +2531,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2563,30 +2560,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2594,14 +2591,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2617,11 +2614,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2630,32 +2627,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2679,15 +2676,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2700,7 +2697,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2709,9 +2706,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2735,11 +2732,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2752,11 +2750,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2766,11 +2765,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2780,24 +2780,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2805,13 +2802,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2819,21 +2816,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2846,50 +2843,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2901,11 +2898,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2916,7 +2913,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2943,10 +2940,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2954,18 +2951,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2973,7 +2970,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2990,7 +2987,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3015,7 +3012,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3036,7 +3033,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3068,7 +3065,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3076,7 +3073,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3101,31 +3098,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3141,8 +3138,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3158,16 +3155,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3191,14 +3188,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3209,19 +3206,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3236,13 +3233,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3268,7 +3265,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3276,7 +3273,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3314,25 +3311,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3348,73 +3345,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3426,7 +3423,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3434,18 +3431,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3453,50 +3450,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3523,7 +3519,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3572,7 +3568,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3583,14 +3579,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3606,14 +3602,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3643,7 +3639,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3657,18 +3653,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3676,21 +3672,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3702,34 +3698,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3741,11 +3737,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3759,16 +3755,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3833,7 +3829,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3841,26 +3837,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3873,7 +3868,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3884,39 +3879,39 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
+ if (display->platform.cherryview) {
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -3929,26 +3924,26 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
* HIGH/LOW bits so that we don't actually change
* the current state.
*/
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3988,7 +3983,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3997,20 +3992,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4023,7 +4017,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4031,33 +4025,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4065,37 +4059,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4145,39 +4139,39 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ if (HAS_PCH_SPLIT(display)) {
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 402b7b2e1829..8d9cb73a93a7 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -44,6 +45,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -191,12 +193,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
else
tmp &= ~PAYLOAD_PRESENT;
- tmp &= ~VBLANK_FENCE;
+ tmp &= ~(VBLANK_FENCE | LP_DATA_TRANSFER | PIPELINE_FLUSH);
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
else
- tmp &= ~LP_DATA_TRANSFER;
+ tmp |= PIPELINE_FLUSH;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@@ -657,7 +659,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy;
u32 val;
@@ -1275,6 +1277,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+ intel_panel_prepare(crtc_state, conn_state);
+
intel_crtc_vblank_on(crtc_state);
}
@@ -1408,6 +1412,8 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_panel_unprepare(old_conn_state);
+
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -1826,107 +1832,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
index d4845ac65acc..b601b7632339 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -272,6 +272,7 @@
#define PAYLOAD_PRESENT (1 << 31)
#define LP_DATA_TRANSFER (1 << 30)
#define VBLANK_FENCE (1 << 29)
+#define PIPELINE_FLUSH (1 << 28)
#define PARAM_WC_MASK (0xffff << 8)
#define PARAM_WC_LOWER_SHIFT 8
#define PARAM_WC_UPPER_SHIFT 16
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 55f3ae1e68c9..dfdde8e4eabe 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -5,12 +5,15 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_psr.h"
#include "intel_psr_regs.h"
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
@@ -23,7 +26,14 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_psr_needs_alpm_aux_less(intel_dp, crtc_state) ||
+ (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp));
+}
+
+void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -31,6 +41,7 @@ void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
+ mutex_init(&intel_dp->alpm_parameters.lock);
}
/*
@@ -276,6 +287,14 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
+ if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
+ return;
+ }
+
+ if (intel_dp->alpm_parameters.sink_alpm_error)
+ return;
+
if (!intel_dp_is_edp(intel_dp))
return;
@@ -288,6 +307,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
+ if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
+ crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ return;
+
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
intel_alpm_aux_less_wake_supported(intel_dp)))
return;
@@ -313,58 +336,173 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(display) < 20 ||
- (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
+ !crtc_state->has_lobf))
return;
+ mutex_lock(&intel_dp->alpm_parameters.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) ||
- (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
- intel_de_write(display,
- PORT_ALPM_CTL(port),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
- PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
- PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks));
-
- intel_de_write(display,
- PORT_ALPM_LFPS_CTL(port),
- PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
- PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms));
+ if (intel_dp->as_sdp_supported) {
+ u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP)
+ pr_alpm_ctl |= PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU;
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR))
+ pr_alpm_ctl |= PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE;
+
+ intel_de_write(display, PR_ALPM_CTL(display, cpu_transcoder),
+ pr_alpm_ctl);
+ }
+
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
}
- if (crtc_state->has_lobf)
+ if (crtc_state->has_lobf) {
alpm_ctl |= ALPM_CTL_LOBF_ENABLE;
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
+ }
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
+ intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+}
+
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 alpm_ctl_val = 0, lfps_ctl_val = 0;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
+ alpm_ctl_val = PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ intel_dp->alpm_parameters.silence_period_sym_clocks);
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ }
+
+ intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
+
+ intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
+}
+
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (old_crtc_state->has_lobf) {
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+ }
+ }
+}
+
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val;
+
+ if (!intel_psr_needs_alpm(intel_dp, crtc_state) && !crtc_state->has_lobf)
+ return;
+
+ val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
+
+ if (crtc_state->has_panel_replay || (crtc_state->has_lobf &&
+ intel_alpm_aux_less_wake_supported(intel_dp)))
+ val |= DP_ALPM_MODE_AUX_LESS;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
+}
+
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (crtc_state->has_psr || !crtc_state->has_lobf ||
+ crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_alpm_enable_sink(intel_dp, crtc_state);
+ intel_alpm_configure(intel_dp, crtc_state);
+ }
+ }
}
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
@@ -403,6 +541,32 @@ out:
DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
+static int
+i915_edp_lobf_debug_get(void *data, u64 *val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ *val = intel_dp->alpm_parameters.lobf_disable_debug;
+
+ return 0;
+}
+
+static int
+i915_edp_lobf_debug_set(void *data, u64 val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ intel_dp->alpm_parameters.lobf_disable_debug = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_lobf_debug_fops,
+ i915_edp_lobf_debug_get, i915_edp_lobf_debug_set,
+ "%llu\n");
+
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -412,6 +576,55 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
+ debugfs_create_file("i915_edp_lobf_debug", 0644, root,
+ connector, &i915_edp_lobf_debug_fops);
+
debugfs_create_file("i915_edp_lobf_info", 0444, root,
connector, &i915_edp_lobf_info_fops);
}
+
+void intel_alpm_disable(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+
+ if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
+ return;
+
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ drm_dbg_kms(display->drm, "Disabling ALPM\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+}
+
+bool intel_alpm_get_error(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(display->drm, "Error reading ALPM status\n");
+ return true;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ drm_dbg_kms(display->drm, "ALPM lock timeout error\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index 8c409b10dce6..a861c20b5d79 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -12,8 +12,10 @@ struct intel_dp;
struct intel_crtc_state;
struct drm_connector_state;
struct intel_connector;
+struct intel_atomic_state;
+struct intel_crtc;
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
+void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
@@ -21,7 +23,19 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state);
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_disable(struct intel_dp *intel_dp);
+bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..348b1655435e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -26,23 +26,24 @@
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
- * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ * See intel_plane.c for the plane-specific atomic functionality.
*/
#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
@@ -275,7 +274,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color = NULL;
crtc_state->dsb_commit = NULL;
crtc_state->use_dsb = false;
@@ -311,7 +310,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color);
drm_WARN_ON(crtc->dev, crtc_state->dsb_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ea935a5d94c8..5bdaef38f13d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -27,9 +27,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
@@ -397,6 +397,19 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
+static void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder trans = crtc_state->cpu_transcoder;
+
+ if (!HAS_DP20(display))
+ return;
+
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ enable && crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -430,6 +443,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
if (needs_wa_14020863754(display))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
+ intel_audio_sdp_split_update(old_crtc_state, false);
+
mutex_unlock(&display->audio.mutex);
}
@@ -555,6 +570,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
+ intel_audio_sdp_split_update(crtc_state, true);
+
if (needs_wa_14020863754(display))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
@@ -587,19 +604,17 @@ static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else if (HAS_PCH_CPT(i915)) {
+ } else if (HAS_PCH_CPT(display)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- } else if (HAS_PCH_IBX(i915)) {
+ } else if (HAS_PCH_IBX(display)) {
regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
regs->aud_config = IBX_AUD_CFG(pipe);
regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
@@ -683,16 +698,6 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&display->audio.mutex);
}
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder trans = crtc_state->cpu_transcoder;
-
- if (HAS_DP20(display))
- intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
- crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
-}
-
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -889,12 +894,10 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
*/
void intel_audio_hooks_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.g4x)
display->funcs.audio = &g4x_audio_funcs;
else if (display->platform.valleyview || display->platform.cherryview ||
- HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ HAS_PCH_CPT(display) || HAS_PCH_IBX(display))
display->funcs.audio = &ibx_audio_funcs;
else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
display->funcs.audio = &hsw_audio_funcs;
@@ -948,7 +951,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+ intel_cdclk_force_min_cdclk(cdclk_state, enable ? 2 * 96000 : 0);
return drm_atomic_commit(&state->base);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index ad49eefa7182..42cf886f3d24 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -31,6 +31,5 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_audio_init(struct intel_display *display);
void intel_audio_register(struct intel_display *display);
void intel_audio_deinit(struct intel_display *display);
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 178dc6c8de80..e007380e9a63 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -7,15 +7,19 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
-
#include <acpi/video.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -472,7 +476,6 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
@@ -485,7 +488,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
@@ -502,7 +505,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
@@ -901,11 +904,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
@@ -1065,7 +1066,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1074,7 +1075,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(i915))
+ if (HAS_PCH_LPT_H(display))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1231,12 +1232,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
@@ -1260,7 +1260,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(display) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
@@ -1467,15 +1467,13 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -1483,14 +1481,12 @@ static int cnp_num_backlight_controllers(struct intel_display *display)
static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -1819,7 +1815,6 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
@@ -1827,14 +1822,14 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT_H) {
- if (HAS_PCH_LPT(i915))
+ } else if (INTEL_PCH_TYPE(display) >= PCH_LPT_H) {
+ if (HAS_PCH_LPT(display))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a8d08d7d82b3..9c268bed091d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,8 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -1937,7 +1939,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display,
int index, len;
if (drm_WARN_ON(display->drm,
- !data || panel->vbt.dsi.seq_version != 1))
+ !data || panel->vbt.dsi.seq_version >= 3))
return 0;
/* index = 1 to skip sequence byte */
@@ -1960,7 +1962,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display,
}
/*
- * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * Some v1/v2 VBT MIPI sequences do the deassert in the init OTP sequence.
* The deassert must be done before calling intel_dsi_device_ready, so for
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
@@ -1971,9 +1973,9 @@ static void vlv_fixup_mipi_sequences(struct intel_display *display,
u8 *init_otp;
int len;
- /* Limit this to v1 vid-mode sequences */
+ /* Limit this to v1/v2 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
- panel->vbt.dsi.seq_version != 1)
+ panel->vbt.dsi.seq_version >= 3)
return;
/* Only do this if there are otp and assert seqs and no deassert seq */
@@ -2244,28 +2246,27 @@ static const u8 adlp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
return vbt_pin;
- } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(display) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
+ } else if (HAS_PCH_TGP(display) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -2864,8 +2865,6 @@ parse_general_definitions(struct intel_display *display)
static void
init_vbt_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
@@ -2882,7 +2881,7 @@ init_vbt_defaults(struct intel_display *display)
* clock for LVDS.
*/
display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
- !HAS_PCH_SPLIT(i915));
+ !HAS_PCH_SPLIT(display));
drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
display->vbt.lvds_ssc_freq);
}
@@ -3115,7 +3114,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
{
struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3126,12 +3124,12 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* If the OpRegion does not have VBT, look in SPI flash
* through MMIO or PCI mapping
*/
- if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ if (!vbt && display->platform.dgfx)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index f9841f0498c6..6cd7a011b8c4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -24,7 +24,7 @@
/*
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
* the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
+ * intel_vbt_data within struct intel_display.
*/
#ifndef _INTEL_BIOS_H_
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index fbd16d7b58d9..65d64f79a4bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_panic.h>
+#include "display/intel_display_types.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -57,3 +59,18 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ return i915_gem_object_alloc_framebuffer();
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ return i915_gem_object_panic_setup(sb);
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ return i915_gem_object_panic_finish(fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index ea7a2253aaa5..97087a64d23b 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
struct drm_gem_object;
+struct drm_scanout_buffer;
+struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -23,5 +25,8 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
+void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 98b898a1de8f..d29a755612de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,6 +5,8 @@
#include <drm/drm_atomic_state_helper.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -12,10 +14,47 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_uncore.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /*
+ * From MTL onwards, to lock a QGV point, punit expects the peak BW of
+ * the selected QGV point as the parameter in multiples of 100MB/s
+ */
+ u16 qgv_point_peakbw;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u16 qgv_points_mask;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -39,14 +78,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +94,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,7 +114,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
@@ -82,14 +122,14 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,15 +142,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +162,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +182,50 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
- u32 points_mask)
+static int icl_pcode_restrict_qgv_points(struct intel_display *display,
+ u32 points_mask)
{
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +241,29 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
+ const struct dram_info *dram_info,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -251,7 +292,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -266,7 +307,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -285,39 +326,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
@@ -405,20 +446,36 @@ static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info xe3lpd_3002_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 22, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -429,7 +486,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -456,7 +513,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -467,44 +524,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -521,7 +579,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -529,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -561,7 +619,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -572,7 +630,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -584,17 +642,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -605,7 +663,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -613,20 +671,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- int num_channels = i915->dram_info.num_channels;
+ int num_channels = dram_info->num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, dram_info, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -638,33 +697,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -674,9 +733,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -692,7 +751,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -702,9 +761,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -720,57 +779,61 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
+ if (DISPLAY_VERx100(display) >= 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
- else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(display) >= 14)
+ tgl_get_bw_info(display, dram_info, &mtl_sa_info);
+ else if (display->platform.dg2)
+ dg2_get_bw_info(display);
+ else if (display->platform.alderlake_p)
+ tgl_get_bw_info(display, dram_info, &adlp_sa_info);
+ else if (display->platform.alderlake_s)
+ tgl_get_bw_info(display, dram_info, &adls_sa_info);
+ else if (display->platform.rocketlake)
+ tgl_get_bw_info(display, dram_info, &rkl_sa_info);
+ else if (DISPLAY_VER(display) == 12)
+ tgl_get_bw_info(display, dram_info, &tgl_sa_info);
+ else if (DISPLAY_VER(display) == 11)
+ icl_get_bw_info(display, dram_info, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -784,8 +847,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -799,7 +862,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
data_rate += crtc_state->data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
@@ -807,51 +870,55 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
}
/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_bw_crtc_min_cdclk(struct intel_display *display,
+ unsigned int data_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
}
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_bw_state, base);
+}
+
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -859,10 +926,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -870,27 +937,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -909,23 +976,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -938,29 +1005,93 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -974,9 +1105,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -986,27 +1117,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1014,7 +1145,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1025,14 +1156,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1043,22 +1174,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1069,14 +1200,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1087,9 +1218,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1097,7 +1228,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1113,80 +1244,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_bw_state_changed(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
{
enum pipe pipe;
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
&old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
+ const struct intel_dbuf_bw *new_dbuf_bw =
&new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
+ intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
return true;
}
return false;
}
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
struct intel_crtc *crtc,
enum plane_id plane_id,
const struct skl_ddb_entry *ddb,
unsigned int data_rate)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
enum dbuf_slice slice;
/*
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
if (!crtc_state->hw.active)
return;
@@ -1199,12 +1340,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
if (plane_id == PLANE_CURSOR)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb[plane_id],
crtc_state->data_rate[plane_id]);
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb_y[plane_id],
crtc_state->data_rate[plane_id]);
}
@@ -1212,13 +1353,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
/* "Maximum Data Buffer Bandwidth" */
static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+intel_bw_dbuf_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int total_max_bw = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
+ for_each_dbuf_slice(display, slice) {
int num_active_planes = 0;
unsigned int max_bw = 0;
enum pipe pipe;
@@ -1227,11 +1368,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
}
max_bw *= num_active_planes;
@@ -1241,16 +1382,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
return DIV_ROUND_UP(total_max_bw, 64);
}
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
enum pipe pipe;
int min_cdclk;
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+ min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk,
+ intel_bw_crtc_min_cdclk(display,
+ bw_state->data_rate[pipe]));
return min_cdclk;
}
@@ -1258,42 +1401,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_bw_state *new_bw_state = NULL;
const struct intel_bw_state *old_bw_state = NULL;
const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
int i;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
-
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
}
if (!old_bw_state)
return 0;
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+ old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
/*
* No need to check against the cdclk state if
@@ -1318,12 +1468,12 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
* requirements. This can reduce back and forth
* display blinking due to constant cdclk changes.
*/
- if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, cdclk_state->bw_min_cdclk);
+ new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
*need_cdclk_calc = true;
return 0;
@@ -1331,7 +1481,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1365,7 +1515,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
*changed = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
@@ -1375,16 +1525,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_bw_can_enable_sagv(display, new_bw_state) !=
+ intel_bw_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ if (any_ms) {
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1395,9 +1632,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_bw_can_enable_sagv(display, old_bw_state) !=
+ intel_bw_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1407,28 +1643,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
@@ -1444,6 +1677,7 @@ void intel_bw_update_hw_state(struct intel_display *display)
return;
bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
@@ -1455,6 +1689,11 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
+
+ skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
}
}
@@ -1470,6 +1709,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+ memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
@@ -1495,9 +1735,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -1511,8 +1750,37 @@ int intel_bw_init(struct drm_i915_private *i915)
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
+
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *new_bw_state, *old_bw_state;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (new_bw_state &&
+ new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
+ return true;
+
+ return false;
+}
+
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(display) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
+{
+ return bw_state->qgv_point_peakbw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 3313e4eac4f0..d51f50c9d302 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,60 +8,14 @@
#include <drm/drm_atomic.h>
-#include "intel_display_limits.h"
-#include "intel_display_power.h"
-#include "intel_global_state.h"
-
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_global_state;
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
-struct intel_bw_state {
- struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
-
- /*
- * Contains a bit mask, used to determine, whether correspondent
- * pipe allows SAGV or not.
- */
- u8 pipe_sagv_reject;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /*
- * From MTL onwards, to lock a QGV point, punit expects the peak BW of
- * the selected QGV point as the parameter in multiples of 100MB/s
- */
- u16 qgv_point_peakbw;
-
- /*
- * Current QGV points mask, which restricts
- * some particular SAGV states, not to confuse
- * with pipe_sagv_mask.
- */
- u16 qgv_points_mask;
-
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
- unsigned int data_rate[I915_MAX_PIPES];
- u8 num_active_planes[I915_MAX_PIPES];
-};
-
-#define to_intel_bw_state(global_state) \
- container_of_const((global_state), struct intel_bw_state, base)
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state);
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
@@ -72,16 +26,21 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
- u32 points_mask);
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state);
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state);
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state);
+void icl_sagv_post_plane_update(struct intel_atomic_state *state);
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state);
+
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2a8749a0213e..228aa64c1349 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -32,16 +32,17 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_watermark.h"
@@ -113,6 +114,42 @@
* dividers can be programmed correctly.
*/
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk to satisfy bandwidth requirements */
+ int bw_min_cdclk;
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
+};
+
struct intel_cdclk_funcs {
void (*get_cdclk)(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
@@ -567,20 +604,18 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
static void vlv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ cdclk_config->vco = vlv_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_config->vco);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (display->platform.valleyview)
@@ -658,16 +693,16 @@ static void vlv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_iosf_sb_get(dev_priv,
+ vlv_iosf_sb_get(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
drm_err(display->drm,
@@ -681,12 +716,12 @@ static void vlv_set_cdclk(struct intel_display *display,
cdclk) - 1;
/* adjust cdclk divider */
- val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~CCK_FREQUENCY_VALUES;
val |= divider;
- vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
drm_err(display->drm,
@@ -694,7 +729,7 @@ static void vlv_set_cdclk(struct intel_display *display,
}
/* adjust self-refresh exit latency value */
- val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val = vlv_bunit_read(display->drm, BUNIT_REG_BISOC);
val &= ~0x7f;
/*
@@ -705,9 +740,9 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
- vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ vlv_bunit_write(display->drm, BUNIT_REG_BISOC, val);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
@@ -723,7 +758,6 @@ static void chv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -747,19 +781,19 @@ static void chv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_punit_get(dev_priv);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ vlv_punit_get(display->drm);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
intel_update_cdclk(display);
@@ -843,7 +877,6 @@ static void bdw_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int ret;
@@ -856,7 +889,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -884,8 +917,8 @@ static void bdw_set_cdclk(struct intel_display *display,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(display->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1125,7 +1158,6 @@ static void skl_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
@@ -1142,10 +1174,10 @@ static void skl_set_cdclk(struct intel_display *display,
drm_WARN_ON_ONCE(display->drm,
display->platform.skylake && vco == 8640000);
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1188,8 +1220,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -1972,9 +2004,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2127,7 +2157,6 @@ static void bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_config mid_cdclk_config;
int cdclk = cdclk_config->cdclk;
int ret = 0;
@@ -2141,18 +2170,18 @@ static void bxt_set_cdclk(struct intel_display *display,
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2181,8 +2210,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* Display versions 14 and beyond
*/;
else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
- ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2190,10 +2219,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level,
- 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2479,7 +2507,6 @@ static void intel_pcode_notify(struct intel_display *display,
bool cdclk_update_valid,
bool pipe_count_update_valid)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 update_mask = 0;
@@ -2494,11 +2521,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -2808,7 +2835,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2836,7 +2862,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
bw_state = intel_atomic_get_new_bw_state(state);
if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ min_cdclk = intel_bw_min_cdclk(display, bw_state);
if (cdclk_state->bw_min_cdclk != min_cdclk) {
int ret;
@@ -3342,6 +3368,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
void intel_cdclk_update_hw_state(struct intel_display *display)
{
+ const struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
@@ -3359,6 +3387,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
+
+ cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3387,7 +3417,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) >= 3002) {
+ display->cdclk.max_cdclk_freq = 480000;
+ } else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
} else if (display->platform.jasperlake || display->platform.elkhartlake) {
if (display->cdclk.hw.ref == 24000)
@@ -3493,7 +3525,6 @@ static int dg1_rawclk(struct intel_display *display)
static int cnp_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
u32 rawclk;
@@ -3513,7 +3544,7 @@ static int cnp_rawclk(struct intel_display *display)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -3528,10 +3559,8 @@ static int pch_rawclk(struct intel_display *display)
static int vlv_hrawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -3552,21 +3581,20 @@ static int i9xx_hrawclk(struct intel_display *display)
*/
u32 intel_read_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
freq = dg1_rawclk(display);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_CNP)
freq = cnp_rawclk(display);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
freq = vlv_hrawclk(display);
@@ -3842,3 +3870,60 @@ void intel_init_cdclk_hooks(struct intel_display *display)
"Unknown platform. Assuming i830\n"))
display->funcs.cdclk = &i830_cdclk_funcs;
}
+
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->logical.cdclk;
+}
+
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.cdclk;
+}
+
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.voltage_level;
+}
+
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe)
+{
+ return cdclk_state->min_cdclk[pipe];
+}
+
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->bw_min_cdclk;
+}
+
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
+
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk ||
+ new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level))
+ return true;
+
+ return false;
+}
+
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk)
+{
+ cdclk_state->force_min_cdclk = force_min_cdclk;
+}
+
+void intel_cdclk_read_hw(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
+
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
+ cdclk_state->actual = display->cdclk.hw;
+ cdclk_state->logical = display->cdclk.hw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index a1cefd455d92..cacee598af0e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,10 +8,9 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-
+enum pipe;
struct intel_atomic_state;
+struct intel_cdclk_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -23,42 +22,6 @@ struct intel_cdclk_config {
bool joined_mbus;
};
-struct intel_cdclk_state {
- struct intel_global_state base;
-
- /*
- * Logical configuration of cdclk (used for all scaling,
- * watermark, etc. calculations and checks). This is
- * computed as if all enabled crtcs were active.
- */
- struct intel_cdclk_config logical;
-
- /*
- * Actual configuration of cdclk, can be different from the
- * logical configuration only when all crtc's are DPMS off.
- */
- struct intel_cdclk_config actual;
-
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
-
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
-
- /* forced minimum cdclk for glk+ audio w/a */
- int force_min_cdclk;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /* update cdclk with pipes disabled */
- bool disable_pipes;
-};
-
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -97,4 +60,13 @@ void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
int intel_cdclk_init(struct intel_display *display);
void intel_cdclk_debugfs_register(struct intel_display *display);
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
+void intel_cdclk_read_hw(struct intel_display *display);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 07d7f4e8f60f..165138b95cb2 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,14 +9,13 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_crtc.h"
#include "intel_cmtg.h"
#include "intel_cmtg_regs.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
/**
* DOC: Common Primary Timing Generator (CMTG)
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
index 668e41d65e86..945a35578284 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_CMTG_REGS_H__
#define __INTEL_CMTG_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define CMTG_CLK_SEL _MMIO(0x46160)
#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index cfe14162231d..671db6926e4c 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,7 +22,9 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -1339,8 +1339,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1350,8 +1350,8 @@ static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1389,7 +1389,7 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1917,7 +1917,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
return;
display->funcs.color->load_luts(crtc_state);
@@ -1965,6 +1965,25 @@ void intel_color_modeset(const struct intel_crtc_state *crtc_state)
}
}
+bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->dsb_color;
+}
+
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && !HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1982,47 +2001,53 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
- if (!crtc_state->dsb_color_vblank)
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
+ else
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+
+ if (!intel_color_uses_dsb(crtc_state))
return;
display->funcs.color->load_luts(crtc_state);
- if (crtc_state->use_dsb) {
- intel_vrr_send_push(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
- intel_vrr_check_push_sent(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
+ intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
+ intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
+ intel_dsb_interrupt(crtc_state->dsb_color);
}
- intel_dsb_finish(crtc_state->dsb_color_vblank);
+ if (intel_color_uses_gosub_dsb(crtc_state))
+ intel_dsb_gosub_finish(crtc_state->dsb_color);
+ else
+ intel_dsb_finish(crtc_state->dsb_color);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank) {
- intel_dsb_cleanup(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_vblank = NULL;
+ if (crtc_state->dsb_color) {
+ intel_dsb_cleanup(crtc_state->dsb_color);
+ crtc_state->dsb_color = NULL;
}
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank)
- intel_dsb_wait(crtc_state->dsb_color_vblank);
-}
-
-bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->dsb_color_vblank;
+ if (crtc_state->dsb_color)
+ intel_dsb_wait(crtc_state->dsb_color);
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ return false;
+
return !old_crtc_state->post_csc_lut &&
!old_crtc_state->pre_csc_lut;
}
@@ -3983,12 +4008,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -4005,7 +4028,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 9d66457c1e89..bf7a12ce9df0 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -24,6 +24,8 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 17eea244cc83..112749f97c26 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,11 +3,13 @@
* Copyright © 2018 Intel Corporation
*/
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#define for_each_combo_phy(__display, __phy) \
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index ee41acdccf4e..3694f95376c2 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_COMBO_PHY_REGS__
#define __INTEL_COMBO_PHY_REGS__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index e42357bd9e80..42c923f416b3 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,8 +31,9 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
-#include "intel_backlight.h"
+#include "i915_utils.h"
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -63,10 +64,10 @@ static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
void intel_connector_queue_modeset_retry_work(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ if (!queue_work(display->wq.unordered, &connector->modeset_retry_work))
drm_connector_put(&connector->base);
}
@@ -151,35 +152,36 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-int intel_connector_register(struct drm_connector *connector)
+int intel_connector_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
- ret = intel_backlight_device_register(intel_connector);
+ ret = intel_panel_register(connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
- goto err_backlight;
+ goto err_panel;
}
- intel_connector_debugfs_add(intel_connector);
+ intel_connector_debugfs_add(connector);
return 0;
-err_backlight:
- intel_backlight_device_unregister(intel_connector);
+err_panel:
+ intel_panel_unregister(connector);
err:
return ret;
}
-void intel_connector_unregister(struct drm_connector *connector)
+void intel_connector_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_backlight_device_unregister(intel_connector);
+ intel_panel_unregister(connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -204,10 +206,9 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -264,20 +265,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -291,20 +291,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -336,14 +335,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 76ffb3f8467c..898c5d9e8f7a 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -31,11 +31,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -44,6 +42,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -91,13 +90,12 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
@@ -177,7 +175,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
int mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -194,14 +191,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_CPT(display))
adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+ if (!HAS_PCH_SPLIT(display))
intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
@@ -356,7 +353,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -368,9 +364,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
@@ -387,7 +383,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev_priv) &&
+ if (HAS_PCH_LPT(display) &&
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
@@ -438,7 +434,6 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -457,7 +452,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
@@ -482,13 +477,12 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ bool turn_off_dac = HAS_PCH_SPLIT(display);
u32 save_adpa;
crt->force_hotplug_required = false;
@@ -532,8 +526,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -550,7 +542,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -577,8 +569,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -586,15 +577,14 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return ilk_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return valleyview_crt_detect_hotplug(connector);
/*
@@ -602,14 +592,14 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G45(dev_priv))
+ if (display->platform.g45)
tries = 2;
else
tries = 1;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
@@ -627,7 +617,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -880,7 +870,7 @@ intel_crt_detect(struct drm_connector *connector,
wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -904,7 +894,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -943,7 +933,6 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
@@ -956,7 +945,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
- if (ret || !IS_G4X(dev_priv))
+ if (ret || !display->platform.g4x)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -1015,16 +1004,15 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector;
struct intel_crt *crt;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -1072,7 +1060,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
@@ -1084,7 +1072,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
@@ -1112,7 +1100,7 @@ void intel_crt_init(struct intel_display *display)
intel_ddi_buf_trans_init(&crt->base);
} else {
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
@@ -1134,7 +1122,7 @@ void intel_crt_init(struct intel_display *display)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 5b2603ef2ff7..a187db6df2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -17,7 +17,6 @@
#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_cursor.h"
@@ -29,6 +28,7 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_vblank.h"
@@ -124,7 +124,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+ crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
@@ -154,9 +154,9 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
- crtc->block_dc_for_vblank = false;
+ crtc->vblank_psr_notify = false;
- flush_work(&display->irq.vblank_dc_work);
+ flush_work(&display->irq.vblank_notify_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -305,7 +305,6 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
struct intel_crtc *crtc;
@@ -333,7 +332,7 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
plane = intel_sprite_plane_create(display, pipe, sprite);
@@ -418,10 +417,13 @@ int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+
return crtc_state->hw.active &&
!crtc_state->preload_luts &&
!intel_crtc_needs_modeset(crtc_state) &&
- intel_crtc_needs_color_update(crtc_state) &&
+ (intel_crtc_needs_color_update(crtc_state) &&
+ !HAS_DOUBLE_BUFFERED_LUT(display)) &&
!intel_color_uses_dsb(crtc_state) &&
!crtc_state->use_dsb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 599ddce96371..0c7f91046996 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,9 +5,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vblank.h"
@@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
{
struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->hw.adjusted_mode.crtc_vdisplay,
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
@@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 3276a5b4a9b0..198e69efe9ac 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,12 +9,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
@@ -23,6 +22,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vblank.h"
@@ -158,10 +158,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 22595766eac5..801235a5bc0a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -6,8 +6,10 @@
#include <linux/log2.h>
#include <linux/math64.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -37,7 +39,13 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- if (display->platform.pantherlake && phy == PHY_A)
+ /* PTL doesn't have a PHY connected to PORT B; as such,
+ * there will never be a case where PTL uses PHY B.
+ * WCL uses PORT A and B with the C10 PHY.
+ * Reusing the condition for WCL and extending it for PORT B
+ * should not cause any issues for PTL.
+ */
+ if (display->platform.pantherlake && phy < PHY_C)
return true;
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
@@ -2761,9 +2769,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (!is_dp && is_hdmi_frl(port_clock))
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
@@ -2774,7 +2782,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
}
@@ -3097,10 +3105,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- if (DISPLAY_VER(display) >= 30)
- clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
- else
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3168,13 +3173,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* clock muxes, gating and SSC
*/
- if (DISPLAY_VER(display) >= 30) {
- mask = XE3_DDI_CLOCK_SELECT_MASK;
- val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- } else {
- mask = XELPDP_DDI_CLOCK_SELECT_MASK;
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- }
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK(display);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display,
+ intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
@@ -3229,6 +3230,46 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
intel_cx0pll_enable(encoder, crtc_state);
}
+/*
+ * According to HAS we need to enable MAC Transmitting LFPS in the "PHY Common
+ * Control 0" PIPE register in case of AUX Less ALPM is going to be used. This
+ * function is doing that and is called by link retrain sequence.
+ */
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ intel_wakeref_t wakeref;
+ int i;
+ u8 owned_lane_mask;
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
+ for (i = 0; i < 4; i++) {
+ int tx = i % 2 + 1;
+ u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
+
+ if (!(owned_lane_mask & lane_mask))
+ continue;
+
+ intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
+ CONTROL0_MAC_TRANSMIT_LFPS,
+ CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
+ }
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -3287,7 +3328,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ XELPDP_DDI_CLOCK_SELECT_MASK(display), 0);
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -3336,7 +3377,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
@@ -3365,7 +3406,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* handling is done via the standard shared DPLL framework.
*/
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index a8f811ca5e7b..c5a7b529955b 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -43,5 +43,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 960f7f778fb8..77eae1d845f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_CX0_PHY_REGS_H__
#define __INTEL_CX0_PHY_REGS_H__
-#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+#include "intel_display_reg_defs.h"
/* DDI Buffer Control */
#define _DDI_CLK_VALFREQ_A 0x64030
@@ -192,10 +192,17 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
-#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
-#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
-#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
-#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
+#define _XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define _XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
+#define XELPDP_DDI_CLOCK_SELECT_MASK(display) (DISPLAY_VER(display) >= 30 ? \
+ _XE3_DDI_CLOCK_SELECT_MASK : _XELPDP_DDI_CLOCK_SELECT_MASK)
+#define XELPDP_DDI_CLOCK_SELECT_PREP(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_PREP(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_PREP(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+#define XELPDP_DDI_CLOCK_SELECT_GET(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_GET(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_GET(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
@@ -278,6 +285,9 @@
#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6)
+#define PHY_CMN1_CONTROL(tx, control) (0x800 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL0_MAC_TRANSMIT_LFPS REG_BIT(1)
+
/* C20 Registers */
#define PHY_C20_WR_ADDRESS_L 0xC02
#define PHY_C20_WR_ADDRESS_H 0xC03
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f38c998935b9..0405396c7750 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -30,11 +30,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
+#include "intel_alpm.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -48,6 +50,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -70,14 +73,17 @@
#include "intel_lspcon.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -106,14 +112,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -124,25 +130,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -155,7 +161,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -163,27 +169,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14)
- return XELPDP_PORT_BUF_CTL1(i915, port);
+ return XELPDP_PORT_BUF_CTL1(display, port);
else
return DDI_BUF_CTL(port);
}
@@ -235,7 +239,7 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
port_name(port));
}
-static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -259,7 +263,7 @@ static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
@@ -346,7 +350,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -359,14 +362,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -379,8 +382,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
}
}
-static int icl_calc_tbt_pll_link(struct intel_display *display,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -414,15 +416,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -445,7 +446,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -468,7 +469,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -507,8 +508,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -516,7 +517,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -578,7 +579,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
@@ -591,11 +592,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
@@ -604,7 +605,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -619,11 +620,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -635,12 +635,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -654,8 +654,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -663,7 +662,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -677,27 +676,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -706,7 +704,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -725,17 +723,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
@@ -744,7 +740,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -765,12 +760,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -804,7 +799,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
@@ -819,13 +813,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -846,7 +840,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
@@ -856,7 +850,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -864,8 +858,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -883,12 +877,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -901,12 +895,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 ||
- intel_dp_mst_encoder_active_links(dig_port))
+ intel_dp_mst_active_streams(intel_dp))
mst_pipe_mask = dp128b132b_pipe_mask;
}
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -914,7 +908,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
@@ -922,12 +916,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
@@ -1041,8 +1035,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1050,53 +1043,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1109,7 +1102,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1117,28 +1110,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1171,14 +1164,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1186,25 +1179,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
COEFF_POLARITY | CURSOR_PROGRAM |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1216,7 +1209,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1227,7 +1220,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1236,7 +1229,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1246,12 +1239,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1261,33 +1254,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1296,13 +1289,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1312,13 +1305,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1329,7 +1322,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1338,7 +1331,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1354,21 +1347,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1378,9 +1371,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1403,6 +1396,21 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 2; ln++) {
int level;
+ /* Wa_16011342517:adl-p */
+ if (display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if ((intel_encoder_is_hdmi(encoder) &&
+ crtc_state->port_clock == 594000) ||
+ (intel_encoder_is_dp(encoder) &&
+ crtc_state->port_clock == 162000)) {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 1);
+ } else {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 0);
+ }
+ }
+
intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
@@ -1490,12 +1498,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1504,7 +1512,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1514,13 +1522,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1529,49 +1537,49 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
_icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel_shift)
{
@@ -1579,20 +1587,20 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
id = (intel_de_read(display, reg) & clk_sel_mask) >> clk_sel_shift;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1600,23 +1608,23 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1629,14 +1637,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1644,23 +1652,23 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1673,23 +1681,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1697,23 +1705,23 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1733,20 +1741,20 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
if (phy >= PHY_C)
id += DPLL_ID_DG1_DPLL2;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1754,23 +1762,23 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1783,39 +1791,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1826,59 +1834,59 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
-static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
@@ -1905,10 +1913,10 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
-static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->base.dev);
enum intel_dpll_id id;
@@ -1928,56 +1936,56 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
-static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -1996,39 +2004,39 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
-static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2063,7 +2071,7 @@ static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void intel_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2081,7 +2089,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2101,7 +2109,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2116,11 +2124,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2135,7 +2143,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2255,10 +2263,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2267,10 +2275,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2445,14 +2453,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
@@ -2474,11 +2482,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2487,28 +2495,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2524,12 +2532,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2541,7 +2549,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
@@ -2549,27 +2557,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2599,13 +2607,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2734,7 +2742,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int ret;
@@ -2771,14 +2778,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 4. Enable the port PLL.
*
* The PLL enabling itself was already done before this function by
- * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * hsw_crtc_enable()->intel_enable_dpll(). We need only
* configure the PLL to port mapping here.
*/
intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2882,16 +2889,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2908,14 +2914,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2931,7 +2937,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2979,12 +2985,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
@@ -3022,10 +3027,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
@@ -3050,27 +3054,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
static void
mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3089,10 +3093,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
intel_wait_ddi_buf_idle(display, port);
@@ -3100,7 +3103,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
mtl_ddi_disable_d2d(encoder);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
}
@@ -3118,7 +3121,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3135,12 +3137,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3160,7 +3162,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3176,8 +3178,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3187,7 +3189,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3195,12 +3196,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
@@ -3220,7 +3221,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
@@ -3249,6 +3249,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
}
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3257,7 +3259,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3359,17 +3361,19 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
+ intel_panel_prepare(crtc_state, conn_state);
+
if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
@@ -3401,7 +3405,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3410,11 +3413,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
@@ -3423,7 +3426,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3433,7 +3436,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3442,8 +3445,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3454,7 +3457,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3475,7 +3478,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
@@ -3483,15 +3486,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -3522,8 +3525,7 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- /* Enable/Disable DP2.0 SDP split config before transcoder */
- intel_audio_sdp_split_update(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
/* 128b/132b SST */
if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
@@ -3567,9 +3569,11 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
+ intel_panel_unprepare(old_conn_state);
intel_psr_disable(intel_dp, old_crtc_state);
+ intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_disable_decompression(state,
@@ -3584,12 +3588,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3653,18 +3657,18 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
- intel_update_active_dpll(state, pipe_crtc, encoder);
+ intel_dpll_update_active(state, pipe_crtc, encoder);
}
/*
@@ -3678,7 +3682,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3697,7 +3701,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3757,6 +3761,18 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_ddi_buf_enable(encoder, intel_dp->DP);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+
+ /*
+ * 6.k If AUX-Less ALPM is going to be enabled:
+ * i. Configure PORT_ALPM_CTL and PORT_ALPM_LFPS_CTL here
+ */
+ intel_alpm_port_configure(intel_dp, crtc_state);
+
+ /*
+ * ii. Enable MAC Transmits LFPS in the "PHY Common Control 0" PIPE
+ * register
+ */
+ intel_lnl_mac_transmit_lfps(encoder, crtc_state);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3765,10 +3781,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 dp_tp_ctl;
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
@@ -3781,10 +3796,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
@@ -3796,11 +3811,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3821,17 +3836,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3841,28 +3856,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3892,34 +3905,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3936,15 +3949,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
@@ -3955,14 +3967,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -4085,11 +4097,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4131,13 +4142,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp_mst_encoder_active_links(dig_port))
+ if (intel_dp_mst_active_streams(intel_dp))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4152,11 +4163,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4164,14 +4175,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4192,7 +4203,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4206,7 +4217,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
@@ -4222,7 +4233,7 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4276,7 +4287,7 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
-static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
@@ -4285,10 +4296,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4309,7 +4320,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id;
@@ -4332,10 +4343,10 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->intel_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(display, encoder->port);
else
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4382,11 +4393,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4421,12 +4432,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4441,13 +4452,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4498,9 +4509,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4510,7 +4521,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4542,11 +4553,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4618,7 +4629,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4627,7 +4638,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4643,15 +4654,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
@@ -4662,7 +4672,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4675,7 +4684,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4692,7 +4701,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4708,7 +4717,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4733,11 +4742,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4800,26 +4809,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
@@ -4848,7 +4857,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4859,7 +4868,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4868,15 +4877,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4889,7 +4898,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4898,8 +4907,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4909,8 +4917,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4918,8 +4925,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4927,11 +4933,10 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4939,8 +4944,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4948,31 +4952,30 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -5015,21 +5018,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -5043,18 +5046,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -5066,7 +5069,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -5078,8 +5080,8 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
@@ -5087,15 +5089,15 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
@@ -5111,7 +5113,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5128,20 +5130,20 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
if (intel_phy_is_snps(display, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
@@ -5155,26 +5157,26 @@ void intel_ddi_init(struct intel_display *display,
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c",
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c%s/PHY %s%c",
port_name(port),
@@ -5182,7 +5184,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c", port_name(port), phy_name(phy));
}
@@ -5218,32 +5220,32 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5255,8 +5257,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5268,36 +5270,36 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5305,29 +5307,29 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5346,7 +5348,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5363,24 +5365,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ drm_WARN_ON(display->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5396,7 +5398,7 @@ void intel_ddi_init(struct intel_display *display,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 353eb04079e9..f6f511bb0431 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -16,9 +16,9 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
+struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
-struct intel_shared_dpll;
enum pipe;
enum port;
enum transcoder;
@@ -40,7 +40,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
@@ -50,7 +50,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index b7399e9d11cc..9ecdcf6b73e4 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "intel_display_conversion.h"
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -19,7 +18,7 @@ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
}
static inline u32
-__intel_de_read(struct intel_display *display, i915_reg_t reg)
+intel_de_read(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -31,7 +30,6 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
@@ -66,7 +64,7 @@ intel_de_read64_2x32(struct intel_display *display,
}
static inline void
-__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
+intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, reg);
@@ -74,10 +72,9 @@ __intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_dmc_wl_get(display, reg);
@@ -85,7 +82,6 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
@@ -95,8 +91,7 @@ __intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
}
static inline u32
-__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
- u32 set)
+intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
@@ -108,15 +103,14 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
return val;
}
-#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms);
}
static inline int
@@ -131,14 +125,14 @@ __intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
static inline int
intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout);
+ timeout_ms);
intel_dmc_wl_put(display, reg);
@@ -147,14 +141,14 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
static inline int
intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -181,20 +175,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout_ms);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, 0, timeout_ms);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -205,7 +197,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -214,15 +206,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b509c70fb58..7035c1fc9033 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,8 +55,8 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
@@ -66,13 +66,15 @@
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
-#include "intel_cursor.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -91,6 +93,7 @@
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
+#include "intel_flipq.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -103,9 +106,9 @@
#include "intel_panel.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
#include "intel_pfit.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
@@ -138,46 +141,47 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+int vlv_get_hpll_vco(struct drm_device *drm)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
return vco_freq[hpll_freq] * 1000;
}
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
- val = vlv_cck_read(dev_priv, reg);
+ val = vlv_cck_read(drm, reg);
divider = val & CCK_FREQUENCY_VALUES;
- drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg)
{
+ struct drm_i915_private *dev_priv = to_i915(drm);
int hpll;
- vlv_cck_get(dev_priv);
+ vlv_cck_get(drm);
if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+ dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
- hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+ hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(drm);
return hpll;
}
@@ -189,7 +193,7 @@ void intel_update_czclk(struct intel_display *display)
if (!display->platform.valleyview && !display->platform.cherryview)
return;
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+ dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
CCK_CZ_CLOCK_CONTROL);
drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
@@ -663,7 +667,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -696,7 +699,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) &&
- intel_set_memory_cxsr(dev_priv, false))
+ intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/*
@@ -1043,19 +1046,16 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
- intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+ intel_frontbuffer_flip(display, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
@@ -1080,6 +1080,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ intel_alpm_post_plane_update(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1168,13 +1172,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_alpm_pre_plane_update(state, crtc);
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1185,8 +1191,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_drrs_deactivate(old_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
-
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1222,7 +1226,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1233,7 +1237,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1257,7 +1261,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1282,7 +1286,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int update_mask = new_crtc_state->update_planes;
@@ -1304,7 +1308,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
fb_bits |= plane->frontbuffer_bit;
}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+ intel_frontbuffer_flip(display, fb_bits);
}
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
@@ -1323,7 +1327,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
- new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->intel_dpll = old_crtc_state->intel_dpll;
new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
}
}
@@ -1511,7 +1515,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -1563,7 +1566,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/*
@@ -1657,19 +1660,18 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, crtc->active))
return;
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
- intel_dmc_enable_pipe(display, pipe_crtc->pipe);
-
- intel_encoders_pre_pll_enable(state, crtc);
-
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
+ const struct intel_crtc_state *new_pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
+ intel_dmc_enable_pipe(new_pipe_crtc_state);
}
+ intel_encoders_pre_pll_enable(state, crtc);
+
+ if (new_crtc_state->intel_dpll)
+ intel_dpll_enable(new_crtc_state);
+
intel_encoders_pre_enable(state, crtc);
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
@@ -1779,8 +1781,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1799,17 +1799,16 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
+ intel_dpll_disable(old_crtc_state);
+
+ intel_encoders_post_pll_disable(state, crtc);
+
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
- intel_disable_shared_dpll(old_pipe_crtc_state);
+ intel_dmc_disable_pipe(old_pipe_crtc_state);
}
-
- intel_encoders_post_pll_disable(state, crtc);
-
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i)
- intel_dmc_disable_pipe(display, pipe_crtc->pipe);
}
/* Prefer intel_encoder_is_combo() */
@@ -1970,7 +1969,7 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
- if (crtc_state->shared_dpll)
+ if (crtc_state->intel_dpll)
set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
@@ -2083,7 +2082,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -2107,7 +2105,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2123,7 +2121,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2147,9 +2144,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (display->platform.cherryview)
- chv_disable_pll(dev_priv, pipe);
+ chv_disable_pll(display, pipe);
else if (display->platform.valleyview)
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
@@ -2160,7 +2157,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
if (display->platform.i830)
@@ -2343,7 +2340,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2362,7 +2358,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
+ intel_is_dual_link_lvds(display)) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
@@ -2420,14 +2416,6 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- IS_DISPLAY_VER(display, 13, 14);
-}
-
static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2436,9 +2424,7 @@ static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
if (!HAS_DSB(display))
return 0;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- vblank_delay = max(vblank_delay, 1);
+ vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
return vblank_delay;
}
@@ -2550,15 +2536,13 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
void intel_panel_sanitize_ssc(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
@@ -2639,6 +2623,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_N2(display, transcoder));
}
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
+}
+
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2703,6 +2696,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
@@ -2722,6 +2724,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ if (DISPLAY_VER(display) >= 30) {
+ /*
+ * Address issues for resolutions with high refresh rate that
+ * have small Hblank, specifically where Hblank is smaller than
+ * one MTP. Simulations indicate this will address the
+ * jitter issues that currently causes BS to be immediately
+ * followed by BE which DPRX devices are unable to handle.
+ * https://groups.vesa.org/wg/DP/document/20494
+ */
+ intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder),
+ crtc_state->min_hblank);
+ }
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
@@ -2764,12 +2779,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2853,6 +2880,10 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vdisplay +
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+
+ if (DISPLAY_VER(display) >= 30)
+ pipe_config->min_hblank = intel_de_read(display,
+ DP_MIN_HBLANK_CTL(cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3835,7 +3866,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3857,7 +3887,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -3920,7 +3950,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -4139,7 +4169,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
return 0;
linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
- cdclk_state->logical.cdclk);
+ intel_cdclk_logical(cdclk_state));
return min(linetime_wm, 0x1ff);
}
@@ -4147,8 +4177,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4161,7 +4189,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((display->platform.geminilake || display->platform.broxton) &&
- skl_watermark_ipc_enabled(dev_priv))
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -4207,7 +4235,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (intel_crtc_needs_modeset(crtc_state)) {
- ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
+ ret = intel_dpll_crtc_get_dpll(state, crtc);
if (ret)
return ret;
}
@@ -4300,6 +4328,22 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return 0;
}
+int intel_display_min_pipe_bpp(void)
+{
+ return 6 * 3;
+}
+
+int intel_display_max_pipe_bpp(struct intel_display *display)
+{
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
+ return 10*3;
+ else if (DISPLAY_VER(display) >= 5)
+ return 12*3;
+ else
+ return 8*3;
+}
+
static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -4309,17 +4353,9 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int bpp, i;
-
- if (display->platform.g4x || display->platform.valleyview ||
- display->platform.cherryview)
- bpp = 10*3;
- else if (DISPLAY_VER(display) >= 5)
- bpp = 12*3;
- else
- bpp = 8*3;
+ int i;
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display);
/* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
@@ -4483,7 +4519,7 @@ copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
/* preserve some things from the slave's original crtc state */
saved_state->uapi = secondary_crtc_state->uapi;
saved_state->scaler_state = secondary_crtc_state->scaler_state;
- saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
+ saved_state->intel_dpll = secondary_crtc_state->intel_dpll;
saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
intel_crtc_free_hw_state(secondary_crtc_state);
@@ -4546,7 +4582,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->intel_dpll = crtc_state->intel_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
@@ -5206,6 +5242,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+ PIPE_CONF_CHECK_I(min_hblank);
+
if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -5298,7 +5336,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
if (display->dpll.mgr)
- PIPE_CONF_CHECK_P(shared_dpll);
+ PIPE_CONF_CHECK_P(intel_dpll);
/* FIXME convert everything over the dpll_mgr */
if (display->dpll.mgr || HAS_GMCH(display))
@@ -5387,8 +5425,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5396,6 +5432,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -5447,7 +5488,7 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
@@ -6007,22 +6048,16 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
+ new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
+ &new_plane_state->hw.fb->format->format,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
- new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Planar formats do not support async flips\n",
- plane->base.base.id, plane->base.name);
- return -EINVAL;
- }
-
/*
* We turn the first async flip request into a sync flip
* so that we can reconfigure the plane (eg. change modifier).
@@ -6169,7 +6204,7 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
}
@@ -6411,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
any_ms = true;
- intel_release_shared_dplls(state, crtc);
+ intel_dpll_release(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6421,7 +6456,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = intel_atomic_check_planes(state);
+ ret = intel_plane_atomic_check(state);
if (ret)
goto fail;
@@ -6429,7 +6464,7 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state);
+ ret = intel_bw_atomic_check(state, any_ms);
if (ret)
goto fail;
@@ -6533,7 +6568,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -6549,7 +6583,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
if (new_crtc_state->pch_pfit.enabled)
ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
@@ -6586,7 +6620,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* During modesets pipe configuration was programmed as the
@@ -6614,18 +6648,24 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(display) >= 9 &&
- !intel_crtc_needs_modeset(new_crtc_state))
+ if (DISPLAY_VER(display) >= 9 && !modeset)
skl_detach_scalers(NULL, new_crtc_state);
+ if (!modeset &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
+ !intel_color_uses_dsb(new_crtc_state) &&
+ HAS_DOUBLE_BUFFERED_LUT(display))
+ intel_color_load_luts(new_crtc_state);
+
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
}
@@ -6650,6 +6690,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
+ intel_psr_notify_pipe_change(state, crtc, true);
+
display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
@@ -6697,10 +6739,10 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state) &&
- !new_crtc_state->use_dsb)
+ !new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_color_commit_noarm(NULL, new_crtc_state);
- if (!new_crtc_state->use_dsb)
+ if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_crtc_planes_update_noarm(NULL, state, crtc);
}
@@ -6712,16 +6754,23 @@ static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq) {
+ intel_flipq_enable(new_crtc_state);
+
+ intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event);
+
+ intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0,
+ new_crtc_state->dsb_commit);
+ } else if (new_crtc_state->use_dsb) {
intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event);
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
} else {
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
if (new_crtc_state->dsb_commit)
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
commit_pipe_pre_planes(state, crtc);
@@ -6769,6 +6818,8 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
+ intel_psr_notify_pipe_change(state, crtc, false);
+
display->funcs.display->crtc_disable(state, crtc);
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
@@ -7041,7 +7092,8 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
- int ret, i;
+ long ret;
+ int i;
for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
if (new_plane_state->fence) {
@@ -7148,7 +7200,17 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
return;
/* FIXME deal with everything */
+ new_crtc_state->use_flipq =
+ intel_flipq_supported(display) &&
+ !new_crtc_state->do_async_flip &&
+ !new_crtc_state->vrr.enable &&
+ !new_crtc_state->has_psr &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state) &&
+ !intel_crtc_needs_color_update(new_crtc_state);
+
new_crtc_state->use_dsb =
+ !new_crtc_state->use_flipq &&
!new_crtc_state->do_async_flip &&
(DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
!intel_crtc_needs_modeset(new_crtc_state) &&
@@ -7164,7 +7226,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
+ if (!new_crtc_state->use_flipq &&
+ !new_crtc_state->use_dsb &&
+ !new_crtc_state->dsb_color)
return;
/*
@@ -7173,14 +7237,20 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Double that for pipe stuff and other overhead.
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
- new_crtc_state->use_dsb ? 1024 : 16);
+ new_crtc_state->use_dsb ||
+ new_crtc_state->use_flipq ? 1024 : 16);
if (!new_crtc_state->dsb_commit) {
+ new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
intel_color_cleanup_commit(new_crtc_state);
return;
}
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) {
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc);
+
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state->dsb_commit,
new_crtc_state);
@@ -7195,7 +7265,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
- intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
+ if (new_crtc_state->use_dsb)
+ intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(new_crtc_state->dsb_commit,
@@ -7211,19 +7282,27 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
skl_detach_scalers(new_crtc_state->dsb_commit,
new_crtc_state);
- if (!new_crtc_state->dsb_color_vblank) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
-
- intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
- intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_interrupt(new_crtc_state->dsb_commit);
- }
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc);
}
- if (new_crtc_state->dsb_color_vblank)
+ if (intel_color_uses_chained_dsb(new_crtc_state))
intel_dsb_chain(state, new_crtc_state->dsb_commit,
- new_crtc_state->dsb_color_vblank, true);
+ new_crtc_state->dsb_color, true);
+ else if (intel_color_uses_gosub_dsb(new_crtc_state))
+ intel_dsb_gosub(new_crtc_state->dsb_commit,
+ new_crtc_state->dsb_color);
+
+ if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+ intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_dsb_interrupt(new_crtc_state->dsb_commit);
+ }
intel_dsb_finish(new_crtc_state->dsb_commit);
}
@@ -7231,7 +7310,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7346,6 +7425,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
display->funcs.display->commit_modeset_enables(state);
+ /* FIXME probably need to sequence this properly */
intel_program_dpkgc_latency(state);
intel_wait_for_vblank_workers(state);
@@ -7369,6 +7449,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb)
intel_vrr_check_push_sent(NULL, new_crtc_state);
+
+ if (new_crtc_state->use_flipq)
+ intel_flipq_disable(new_crtc_state);
}
/*
@@ -7412,7 +7495,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color);
old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit);
}
@@ -7445,7 +7528,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* toggling overhead at and above 60 FPS.
*/
intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7505,7 +7588,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
intel_atomic_swap_global_state(state);
- intel_shared_dpll_swap_state(state);
+ intel_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -7517,10 +7600,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
{
struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -7554,7 +7636,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7564,7 +7646,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7626,15 +7708,13 @@ static bool ilk_has_edp_a(struct intel_display *display)
static bool intel_ddi_crt_present(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9)
return false;
if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
+ if (HAS_PCH_LPT_H(display) &&
intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
@@ -7656,7 +7736,6 @@ bool assert_port_valid(struct intel_display *display, enum port port)
void intel_setup_outputs(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -7672,8 +7751,8 @@ void intel_setup_outputs(struct intel_display *display)
intel_bios_for_each_encoder(display, intel_ddi_init);
if (display->platform.geminilake || display->platform.broxton)
- vlv_dsi_init(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ vlv_dsi_init(display);
+ } else if (HAS_PCH_SPLIT(display)) {
int found;
/*
@@ -7681,7 +7760,7 @@ void intel_setup_outputs(struct intel_display *display)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
@@ -7756,15 +7835,15 @@ void intel_setup_outputs(struct intel_display *display)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (display->platform.pineview) {
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
} else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
if (display->platform.mobile)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
@@ -7806,10 +7885,10 @@ void intel_setup_outputs(struct intel_display *display)
intel_tv_init(display);
} else if (DISPLAY_VER(display) == 2) {
if (display->platform.i85x)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
for_each_intel_encoder(display->drm, encoder) {
@@ -7819,7 +7898,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
drm_helper_move_panel_connectors_to_head(display->drm);
}
@@ -8041,13 +8120,11 @@ static const struct intel_display_funcs i9xx_display_funcs = {
*/
void intel_init_display_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9) {
display->funcs.display = &skl_display_funcs;
} else if (HAS_DDI(display)) {
display->funcs.display = &ddi_display_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
display->funcs.display = &pch_split_display_funcs;
} else if (display->platform.cherryview ||
display->platform.valleyview) {
@@ -8083,6 +8160,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 3b54a62c290a..37e2ab301a80 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -30,38 +30,21 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
-enum drm_scaling_filter;
-struct dpll;
struct drm_atomic_state;
-struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_file;
-struct drm_format_info;
-struct drm_framebuffer;
-struct drm_i915_private;
-struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
-struct drm_plane;
-struct drm_plane_state;
-struct i915_address_space;
-struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_display;
-struct intel_dp;
struct intel_encoder;
-struct intel_initial_plane_config;
struct intel_link_m_n;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
-struct pci_dev;
-struct work_struct;
-
#define pipe_name(p) ((p) + 'A')
@@ -452,10 +435,10 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_hpll_vco(struct drm_device *drm);
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -524,6 +507,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_display_min_pipe_bpp(void);
+int intel_display_max_pipe_bpp(struct intel_display *display);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 0578b68404da..4d565935e2cc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -2,10 +2,11 @@
/* Copyright © 2024 Intel Corporation */
#include "i915_drv.h"
+#include "intel_display_conversion.h"
struct intel_display *__i915_to_display(struct drm_i915_private *i915)
{
- return &i915->display;
+ return i915->display;
}
struct intel_display *__drm_to_display(struct drm_device *drm)
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index eeb7ae3eaea8..8c226406c5cd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -21,17 +21,15 @@
#include "intel_display_limits.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
+#include "intel_dmc_wl.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_dmc_wl.h"
+#include "intel_pch.h"
#include "intel_wm_types.h"
-struct task_struct;
-
-struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
struct i915_audio_component;
@@ -44,7 +42,7 @@ struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dmc;
-struct intel_dpll_funcs;
+struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
struct intel_fdi_funcs;
@@ -52,6 +50,7 @@ struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
+struct task_struct;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -80,7 +79,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,8 +89,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
- void (*sanitize)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -123,11 +122,11 @@ struct intel_audio {
* intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
* dpll, because on some platforms plls share registers.
*/
-struct intel_dpll {
+struct intel_dpll_global {
struct mutex lock;
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ int num_dpll;
+ struct intel_dpll dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *mgr;
struct {
@@ -160,6 +159,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -170,8 +170,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
@@ -179,7 +179,7 @@ struct intel_hotplug {
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
- * enabled. Protected by drm_i915_private::irq_lock.
+ * enabled. Protected by intel_display::irq::lock.
*/
bool detection_work_enabled;
@@ -288,6 +288,9 @@ struct intel_display {
/* Platform (and subplatform, if any) identification */
struct intel_display_platforms platform;
+ /* Intel PCH: where the south display engine lives */
+ enum intel_pch pch_type;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -297,7 +300,7 @@ struct intel_display {
const struct intel_cdclk_funcs *cdclk;
/* Display pll funcs */
- const struct intel_dpll_funcs *dpll;
+ const struct intel_dpll_global_funcs *dpll;
/* irq display functions */
const struct intel_hotplug_funcs *hotplug;
@@ -425,7 +428,7 @@ struct intel_display {
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct intel_hdcp_gsc_context *gsc_context;
/* Mutex to protect the above hdcp related values. */
struct mutex hdcp_mutex;
} hdcp;
@@ -453,6 +456,9 @@ struct intel_display {
} ips;
struct {
+ /* protects the irq masks */
+ spinlock_t lock;
+
/*
* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -465,15 +471,21 @@ struct intel_display {
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
- int vblank_wa_num_pipes;
+ int vblank_enable_count;
- struct work_struct vblank_dc_work;
+ struct work_struct vblank_notify_work;
u32 de_irq_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
struct {
+ /* protected by wm.wm_mutex */
+ u16 linetime[I915_MAX_PIPES];
+ bool disable[I915_MAX_PIPES];
+ } pkgc;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -533,6 +545,11 @@ struct intel_display {
} sagv;
struct {
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex lock;
+ } sbi;
+
+ struct {
/*
* DG2: Mask of PHYs that were not calibrated by the firmware
* and should not be used.
@@ -559,12 +576,15 @@ struct intel_display {
/* hipri wq for commit cleanups */
struct workqueue_struct *cleanup;
+
+ /* unordered workqueue for all display unordered work */
+ struct workqueue_struct *unordered;
} wq;
/* Grouping using named structs. Keep sorted. */
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
- struct intel_dpll dpll;
+ struct intel_dpll_global dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
@@ -574,6 +594,8 @@ struct intel_display {
struct intel_vbt_data vbt;
struct intel_dmc_wl wl;
struct intel_wm wm;
+
+ struct work_struct psr_dc5_dc6_wa_work;
};
#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index fdedf65bee53..ce3f9810c42d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -4,15 +4,16 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_drv.h"
-#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
@@ -24,6 +25,8 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -37,6 +40,7 @@
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
@@ -54,6 +58,8 @@ static int intel_display_caps(struct seq_file *m, void *data)
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(display));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -81,7 +87,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
@@ -89,7 +94,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
else if (display->platform.i965gm || display->platform.g4x ||
display->platform.i945g || display->platform.i945gm)
@@ -554,6 +559,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
DRM_RECT_ARG(&crtc_state->pipe_src),
str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tport_clock=%d, lane_count=%d\n",
+ crtc_state->port_clock, crtc_state->lane_count);
intel_scaler_info(m, crtc);
@@ -580,13 +587,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
drm_modeset_lock_all(display->drm);
@@ -605,18 +611,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct intel_display *display = node_to_intel_display(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(display),
- DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -625,7 +620,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
drm_modeset_lock_all(display->drm);
@@ -634,7 +629,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
display->dpll.ref_clks.nssc,
display->dpll.ref_clks.ssc);
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
@@ -690,14 +685,11 @@ static bool
intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
@@ -820,7 +812,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -829,7 +820,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
@@ -844,10 +834,10 @@ void intel_display_debugfs_register(struct intel_display *display)
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
@@ -984,7 +974,7 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
+ str_true_false(dsc_enable));
intel_dp->force_dsc_en = dsc_enable;
*offp += len;
@@ -1195,7 +1185,7 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Fractional BPP Enable\n",
- (dsc_fractional_bpp_enable) ? "true" : "false");
+ str_true_false(dsc_fractional_bpp_enable));
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
*offp += len;
@@ -1337,6 +1327,7 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
intel_psr_connector_debugfs_add(connector);
intel_alpm_lobf_debugfs_add(connector);
intel_dp_link_training_debugfs_add(connector);
+ intel_link_bw_connector_debugfs_add(connector);
if (DISPLAY_VER(display) >= 11 &&
((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 738ae522c8f4..089cffabbad5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,11 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/pciids.h>
-#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
-#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/intel/pciids.h>
+
#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
@@ -16,6 +18,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1477,6 +1480,7 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
+ { 30, 2, &xe2_lpd_display },
};
static const struct intel_display_device_info *
@@ -1619,13 +1623,17 @@ static void display_platforms_or(struct intel_display_platforms *dst,
struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
{
- struct intel_display *display = to_intel_display(pdev);
+ struct intel_display *display;
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
enum intel_step step;
+ display = kzalloc(sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return ERR_PTR(-ENOMEM);
+
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
@@ -1706,12 +1714,15 @@ no_display:
void intel_display_device_remove(struct intel_display *display)
{
+ if (!display)
+ return;
+
intel_display_params_free(&display->params);
+ kfree(display);
}
static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
@@ -1775,7 +1786,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
goto display_fused_off;
}
- if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(display)) {
u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
@@ -1790,7 +1801,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(i915) &&
+ (HAS_PCH_CPT(display) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(display->drm,
"Display fused off, disabling\n");
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 7a3bb77c7af7..4308822f0415 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -143,9 +143,11 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
@@ -155,10 +157,11 @@ struct intel_display_platforms {
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
@@ -167,9 +170,11 @@ struct intel_display_platforms {
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_FDI(__display) (IS_DISPLAY_VER((__display), 5, 8) && !HAS_GMCH(__display))
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -178,6 +183,7 @@ struct intel_display_platforms {
#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PIPEDMC(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
@@ -186,14 +192,10 @@ struct intel_display_platforms {
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
- ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
- HAS_DSC(__display))
+#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \
+ DISPLAY_VER(__display) == 14) && HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 31740a677dd8..8586ba102605 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -27,6 +27,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
@@ -43,6 +44,7 @@
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
+#include "intel_flipq.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -54,6 +56,7 @@
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_vga.h"
#include "intel_wm.h"
@@ -82,19 +85,12 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_cdclk_state *cdclk_state;
-
if (!HAS_DISPLAY(display))
return;
- cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
-
- intel_update_cdclk(display);
- intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
+ intel_cdclk_read_hw(display);
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -181,7 +177,8 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_pch_detect(display);
if (!HAS_DISPLAY(display))
return;
@@ -193,12 +190,12 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
+ intel_display_irq_init(display);
intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
- intel_dpll_init_clock_hook(i915);
+ intel_dpll_init_clock_hook(display);
intel_init_display_hooks(display);
intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
@@ -226,6 +223,8 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_bios;
+ intel_psr_dc5_dc6_wa_init(display);
+
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
@@ -238,34 +237,60 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (!HAS_DISPLAY(display))
return 0;
- intel_dmc_init(display);
+ display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
+ if (!display->hotplug.dp_wq) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ if (!display->wq.modeset) {
+ ret = -ENOMEM;
+ goto cleanup_wq_dp;
+ }
+
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!display->wq.flip) {
+ ret = -ENOMEM;
+ goto cleanup_wq_modeset;
+ }
+
display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ if (!display->wq.cleanup) {
+ ret = -ENOMEM;
+ goto cleanup_wq_flip;
+ }
+
+ display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ if (!display->wq.unordered) {
+ ret = -ENOMEM;
+ goto cleanup_wq_cleanup;
+ }
+
+ intel_dmc_init(display);
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
ret = intel_color_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
- ret = intel_bw_init(i915);
+ ret = intel_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
intel_init_quirks(display);
@@ -273,6 +298,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_unordered:
+ destroy_workqueue(display->wq.unordered);
+cleanup_wq_cleanup:
+ destroy_workqueue(display->wq.cleanup);
+cleanup_wq_flip:
+ destroy_workqueue(display->wq.flip);
+cleanup_wq_modeset:
+ destroy_workqueue(display->wq.modeset);
+cleanup_wq_dp:
+ destroy_workqueue(display->hotplug.dp_wq);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -315,11 +350,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -341,9 +374,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -422,14 +453,13 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
intel_panel_sanitize_ssc(display);
@@ -448,7 +478,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
}
intel_plane_possible_crtcs_init(display);
- intel_shared_dpll_init(display);
+ intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
intel_update_czclk(display);
@@ -460,8 +490,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_hti_init(display);
- /* Just disable it once at startup */
- intel_vga_disable(display);
intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
@@ -471,7 +499,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_display_driver_disable_user_access(display);
drm_modeset_lock_all(display->drm);
- intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(display->drm);
@@ -483,7 +511,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -498,7 +526,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -511,6 +538,8 @@ int intel_display_driver_probe(struct intel_display *display)
*/
intel_hdcp_component_init(display);
+ intel_flipq_init(display);
+
/*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
@@ -524,16 +553,15 @@ int intel_display_driver_probe(struct intel_display *display)
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
void intel_display_driver_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
@@ -558,9 +586,9 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
- intel_fbdev_setup(i915);
+ intel_fbdev_setup(display);
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
@@ -577,6 +605,7 @@ void intel_display_driver_remove(struct intel_display *display)
flush_workqueue(display->wq.flip);
flush_workqueue(display->wq.modeset);
flush_workqueue(display->wq.cleanup);
+ flush_workqueue(display->wq.unordered);
/*
* MST topology needs to be suspended so we don't have any calls to
@@ -589,8 +618,6 @@ void intel_display_driver_remove(struct intel_display *display)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -600,12 +627,12 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_workqueue(i915->unordered_wq);
+ flush_workqueue(display->wq.unordered);
intel_hdcp_component_fini(display);
@@ -617,9 +644,11 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_gmbus_teardown(display);
+ destroy_workqueue(display->hotplug.dp_wq);
destroy_workqueue(display->wq.flip);
destroy_workqueue(display->wq.modeset);
destroy_workqueue(display->wq.cleanup);
+ destroy_workqueue(display->wq.unordered);
intel_fbc_cleanup(display);
}
@@ -695,13 +724,11 @@ __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(display);
+ intel_modeset_setup_hw_state(display, ctx);
if (!state)
return 0;
@@ -733,7 +760,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -761,7 +787,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index aa23bb817805..fb25ec8adae3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,17 +5,19 @@
#include <drm/drm_vblank.h>
-#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "icl_dsi_regs.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
@@ -24,6 +26,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
#include "intel_pipe_crc_regs.h"
+#include "intel_plane.h"
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -115,9 +118,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display,
}
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -125,59 +127,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
intel_de_write(display, DEIMR, dev_priv->irq_mask);
intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@@ -194,93 +196,92 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ if (new_val != display->irq.de_irq_mask[pipe]) {
+ display->irq.de_irq_mask[pipe] = new_val;
intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
intel_de_write(display, SDEIMR, sdeimr);
intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (DISPLAY_VER(display) < 5)
goto out;
@@ -318,48 +319,48 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
@@ -368,49 +369,41 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
-/**
- * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
- */
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+/* enable ASLE pipestat for OpRegion */
+static void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (!intel_opregion_asle_present(display))
return;
if (!i915_has_legacy_blc_interrupt(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -427,7 +420,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -440,20 +433,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -461,25 +453,21 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
@@ -487,58 +475,55 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
intel_de_write(display,
- PIPESTAT(dev_priv, pipe),
+ PIPESTAT(display, pipe),
PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
+ spin_unlock(&display->irq.lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -566,12 +551,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -589,25 +574,24 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
intel_de_write(display, reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -617,22 +601,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -645,21 +628,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -669,18 +651,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -691,26 +672,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
@@ -753,14 +734,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
{}
};
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (err_int & ERR_INT_INVALID_GTT_PTE)
drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
@@ -768,17 +748,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
if (err_int & ERR_INT_INVALID_PTE_DATA)
drm_err_ratelimited(display->drm, "Invalid PTE data\n");
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
@@ -790,34 +770,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(display, pipe);
intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -828,20 +806,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
}
static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
@@ -894,14 +872,13 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -910,58 +887,57 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (de_iir & DE_GTT_FAULT)
ilk_gtt_fault_irq_handler(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ if (HAS_PCH_CPT(display))
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
+ ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
@@ -977,35 +953,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1015,7 +991,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1027,12 +1003,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -1040,11 +1016,17 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (DISPLAY_VER(display) >= 14)
+ if (DISPLAY_VER(display) >= 20)
+ return MTL_PLANE_ATS_FAULT |
+ GEN9_PIPE_CURSOR_FAULT |
+ GEN11_PIPE_PLANE5_FAULT |
+ GEN9_PIPE_PLANE4_FAULT |
+ GEN9_PIPE_PLANE3_FAULT |
+ GEN9_PIPE_PLANE2_FAULT |
+ GEN9_PIPE_PLANE1_FAULT;
+ else if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -1195,15 +1177,14 @@ gen8_pipe_fault_handlers(struct intel_display *display)
return bdw_pipe_fault_handlers;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -1213,20 +1194,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -1239,12 +1220,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
@@ -1256,19 +1237,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -1278,7 +1258,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -1294,12 +1274,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1311,28 +1291,27 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
- struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
@@ -1346,7 +1325,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
@@ -1359,32 +1338,31 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
intel_de_write(display, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
intel_de_write(display, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -1396,52 +1374,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -1449,44 +1427,48 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
- "The master control interrupt lied (DE PIPE)!\n");
+ drm_err_ratelimited(display->drm,
+ "The master control interrupt lied (DE PIPE %c)!\n",
+ pipe_name(pipe));
continue;
}
intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
+ if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT)
+ intel_pipedmc_irq_handler(display, pipe);
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
intel_pipe_fault_irq_handler(display,
gen8_pipe_fault_handlers(display),
pipe, fault_errors);
}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+ if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
master_ctl & GEN8_DE_PCH_IRQ) {
u32 pica_iir;
@@ -1495,31 +1477,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_handler(display, iir);
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
- struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1532,20 +1513,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
@@ -1553,16 +1531,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
+ gen8_de_irq_handler(display, disp_ctl);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1570,117 +1547,116 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
+ if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
+ if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_enable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1688,15 +1664,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_disable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
@@ -1722,44 +1698,36 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
-static void intel_display_vblank_dc_work(struct work_struct *work)
+static void intel_display_vblank_notify_work(struct work_struct *work)
{
struct intel_display *display =
- container_of(work, typeof(*display), irq.vblank_dc_work);
- int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+ container_of(work, typeof(*display), irq.vblank_notify_work);
+ int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
- /*
- * NOTE: intel_display_power_set_target_dc_state is used only by PSR
- * code for DC3CO handling. DC3CO target state is currently disabled in
- * PSR code. If DC3CO is taken into use we need take that into account
- * here as well.
- */
- intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
- DC_STATE_EN_UPTO_DC6);
+ intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
}
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, true))
return 0;
- if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
+ schedule_work(&display->irq.vblank_notify_work);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1769,19 +1737,18 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, false))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
- if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
+ schedule_work(&display->irq.vblank_notify_work);
}
static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
@@ -1892,11 +1859,11 @@ void vlv_display_error_irq_handler(struct intel_display *display,
vlv_page_table_error_irq_handler(display, dpinvgtt);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
@@ -1904,31 +1871,60 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
+}
+
+void i915_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
+}
+
+void i965_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
}
static u32 vlv_error_mask(void)
@@ -1937,17 +1933,14 @@ static u32 vlv_error_mask(void)
return VLV_ERROR_PAGE_TABLE;
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT,
DPINVGTT_STATUS_MASK_CHV |
DPINVGTT_EN_MASK_CHV);
@@ -1961,9 +1954,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -1972,53 +1965,76 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_LPE_PIPE_B_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
+{
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_postinstall(display);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+void ibx_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (HAS_PCH_NOP(i915))
+ return;
+
+ gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
+
+ if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ intel_de_write(display, SERR_INT, 0xffffffff);
+}
+
+void gen8_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
+
+ if (HAS_PCH_SPLIT(i915))
+ ibx_display_irq_reset(display);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2026,10 +2042,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
continue;
intel_de_write(display,
- TRANS_PSR_IMR(dev_priv, trans),
+ TRANS_PSR_IMR(display, trans),
0xffffffff);
intel_de_write(display,
- TRANS_PSR_IIR(dev_priv, trans),
+ TRANS_PSR_IIR(display, trans),
0xffffffff);
}
} else {
@@ -2037,7 +2053,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2045,55 +2061,55 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_irq_mask[pipe],
+ ~display->irq.de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -2110,17 +2126,16 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+ if (HAS_PCH_NOP(display))
return;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+ else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
else
mask = SDE_GMBUS_CPT;
@@ -2128,40 +2143,50 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ if (display->irq.vlv_display_irqs_enabled)
+ goto out;
+
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ _vlv_display_irq_postinstall(display);
}
+
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ if (!display->irq.vlv_display_irqs_enabled)
+ goto out;
+
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -2182,59 +2207,57 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
i915->irq_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_postinstall(display);
+ else if (HAS_PCH_SPLIT(display))
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -2244,25 +2267,29 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
+ /* TODO figure PIPEDMC interrupts for pre-LNL */
+ if (DISPLAY_VER(display) >= 20)
+ de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT;
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2270,19 +2297,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
- TRANS_PSR_IIR(dev_priv, trans));
+ TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
+ display->irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -2291,7 +2318,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -2301,9 +2328,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
@@ -2315,43 +2341,68 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+void intel_display_irq_init(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ spin_lock_init(&display->irq.lock);
+
+ display->drm->vblank_disable_immediate = true;
+
+ intel_hotplug_irq_init(display);
- intel_hotplug_irq_init(i915);
+ INIT_WORK(&display->irq.vblank_notify_work,
+ intel_display_vblank_notify_work);
+}
+
+struct intel_display_irq_snapshot {
+ u32 derrmr;
+};
+
+struct intel_display_irq_snapshot *
+intel_display_irq_snapshot_capture(struct intel_display *display)
+{
+ struct intel_display_irq_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
+ snapshot->derrmr = intel_de_read(display, DERRMR);
+
+ return snapshot;
+}
+
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index d9867cd0a220..c66db3851da4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -12,28 +12,29 @@
enum pipe;
struct drm_crtc;
-struct drm_i915_private;
+struct drm_printer;
struct intel_display;
+struct intel_display_irq_snapshot;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -46,41 +47,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void ibx_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+void i915_display_irq_postinstall(struct intel_display *display);
+void i965_display_irq_postinstall(struct intel_display *display);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void intel_display_irq_init(struct intel_display *display);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
+
+struct intel_display_irq_snapshot *intel_display_irq_snapshot_capture(struct intel_display *display);
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, struct drm_printer *p);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index c4f1ab43fc0c..75316247ee8a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -62,6 +62,9 @@ intel_display_param_named_unsafe(enable_dpt, bool, 0400,
intel_display_param_named_unsafe(enable_dsb, bool, 0400,
"Enable display state buffer (DSB) (default: true)");
+intel_display_param_named_unsafe(enable_flipq, bool, 0400,
+ "Enable DMC flip queue (default: false)");
+
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 5317138e6044..784e6bae8615 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -31,6 +31,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(bool, enable_dpt, true, 0400) \
param(bool, enable_dsb, true, 0600) \
+ param(bool, enable_flipq, false, 0600) \
param(bool, enable_sagv, true, 0600) \
param(int, disable_power_well, -1, 0400) \
param(bool, enable_ips, true, 0600) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index f7171e6932dc..273054c22325 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -16,6 +18,8 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
@@ -204,7 +208,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -322,6 +326,35 @@ unlock:
mutex_unlock(&power_domains->lock);
}
+/**
+ * intel_display_power_get_current_dc_state - Set target dc state.
+ * @display: display device
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+u32 intel_display_power_get_current_dc_state(struct intel_display *display)
+{
+ struct i915_power_well *power_well;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u32 current_dc_state = DC_STATE_DISABLE;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(display->drm, !power_well))
+ goto unlock;
+
+ current_dc_state = intel_power_well_is_enabled(display, power_well) ?
+ DC_STATE_DISABLE : power_domains->target_dc_state;
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+
+ return current_dc_state;
+}
+
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
@@ -455,7 +488,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -473,8 +505,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -512,9 +544,10 @@ __intel_display_power_get_domain(struct intel_display *display,
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -539,12 +572,11 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -560,7 +592,7 @@ intel_display_power_get_if_enabled(struct intel_display *display,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -623,12 +655,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -636,7 +666,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -644,11 +674,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -688,9 +717,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
@@ -711,10 +740,10 @@ void __intel_display_power_put_async(struct intel_display *display,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -746,9 +775,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -765,7 +794,6 @@ out_verify:
*/
void intel_display_power_flush_work(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -786,7 +814,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -824,10 +852,8 @@ void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
@@ -846,10 +872,8 @@ void intel_display_power_put(struct intel_display *display,
void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
@@ -1233,10 +1257,8 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.haswell) {
- if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1373,26 +1395,24 @@ static void hsw_restore_lcpll(struct intel_display *display)
*/
static void hsw_enable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
drm_dbg_kms(display->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv))
+ if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1423,14 +1443,13 @@ static void intel_pch_reset_handshake(struct intel_display *display,
static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1586,9 +1605,7 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
static void tgl_bw_buddy_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum intel_dram_type type = dev_priv->dram_info.type;
- u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct buddy_page_mask *table;
unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
@@ -1605,8 +1622,8 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table = tgl_buddy_page_masks;
for (config = 0; table[config].page_mask != 0; config++)
- if (table[config].num_channels == num_channels &&
- table[config].type == type)
+ if (table[config].num_channels == dram_info->num_channels &&
+ table[config].type == dram_info->type)
break;
if (table[config].page_mask == 0) {
@@ -1632,20 +1649,19 @@ static void tgl_bw_buddy_init(struct intel_display *display)
static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1866,12 +1882,11 @@ static void vlv_cmnlane_wa(struct intel_display *display)
static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
- vlv_punit_get(dev_priv);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- vlv_punit_put(dev_priv);
+ vlv_punit_get(display->drm);
+ ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(display->drm);
return ret;
}
@@ -1916,7 +1931,6 @@ static void intel_power_domains_verify_state(struct intel_display *display);
*/
void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
@@ -1940,9 +1954,9 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
assert_isp_power_gated(display);
} else if (display->platform.broadwell || display->platform.haswell) {
hsw_assert_cdclk(display);
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
} else if (display->platform.ivybridge) {
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
}
/*
@@ -1979,7 +1993,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
@@ -1993,7 +2006,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -2238,8 +2251,6 @@ static void intel_power_domains_verify_state(struct intel_display *display)
void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
intel_power_domains_suspend(display, s2idle);
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
@@ -2250,14 +2261,12 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
display->platform.broxton) {
gen9_sanitize_dc_state(display);
@@ -2267,8 +2276,8 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1b53d67f9b60..f8813b0e16df 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,6 +183,7 @@ void intel_display_power_suspend(struct intel_display *display);
void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
+u32 intel_display_power_get_current_dc_state(struct intel_display *display);
bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index e80e1fd611ca..77268802b55e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "vlv_sideband_reg.h"
+#include "vlv_iosf_sb_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
@@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8ec87ffd87d2..48cac225a809 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,8 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -24,12 +26,25 @@
#include "intel_hotplug.h"
#include "intel_pcode.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
+#include "vlv_iosf_sb_reg.h"
#include "vlv_sideband.h"
-#include "vlv_sideband_reg.h"
+
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ *
+ * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
+ */
+static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
+{
+ int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
+
+ return pw_idx - pw1_idx + SKL_PG1;
+}
struct i915_power_well_regs {
i915_reg_t bios;
@@ -186,22 +201,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -309,8 +320,8 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- bool disabled;
u32 reqs;
+ int ret;
/*
* Bspec doesn't require waiting for PWs to get disabled, but still do
@@ -321,12 +332,18 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(display, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
- if (disabled)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
+ ret = intel_de_wait_for_clear(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
+ if (!ret)
return;
+ /* Refresh requesters in case they popped up during the wait. */
+ if (!reqs)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
@@ -351,8 +368,7 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
/* Wa_16013190616:adlp */
if (display->platform.alderlake_p && pg == SKL_PG1)
@@ -376,8 +392,8 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
+
gen9_wait_for_power_well_fuses(display, pg);
}
@@ -487,8 +503,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
+ ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -752,8 +767,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -762,6 +778,9 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
+ if (!power_domains->initializing)
+ intel_psr_notify_dc5_dc6(display);
+
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
@@ -772,11 +791,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -799,7 +826,6 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -816,9 +842,10 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
- assert_dmc_loaded(display);
+ assert_display_rpm_held(display);
+
+ assert_main_dmc_loaded(display);
}
void gen9_enable_dc5(struct intel_display *display)
@@ -849,7 +876,7 @@ static void assert_can_enable_dc6(struct intel_display *display)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void skl_enable_dc6(struct intel_display *display)
@@ -1091,7 +1118,6 @@ static void i830_pipes_power_well_sync_hw(struct intel_display *display,
static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1101,29 +1127,29 @@ static void vlv_set_power_well(struct intel_display *display,
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void vlv_power_well_enable(struct intel_display *display,
@@ -1141,7 +1167,6 @@ static void vlv_power_well_disable(struct intel_display *display,
static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1151,9 +1176,9 @@ static bool vlv_power_well_enabled(struct intel_display *display,
mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1167,10 +1192,10 @@ static bool vlv_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
drm_WARN_ON(display->drm, ctrl != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1201,7 +1226,6 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
static void vlv_display_power_well_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1225,9 +1249,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(display);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1236,8 +1258,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1245,7 +1267,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(display);
+ intel_vga_disable(display);
intel_pps_unlock_regs_wa(display);
}
@@ -1254,9 +1276,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1265,7 +1285,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
@@ -1431,7 +1451,6 @@ static void assert_chv_phy_status(struct intel_display *display)
static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1455,30 +1474,30 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(display, DISPLAY_PHY_CONTROL,
@@ -1529,7 +1548,6 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1547,9 +1565,9 @@ static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_ph
else
reg = CHV_CMN_DW6_CH1;
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, phy, reg);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ val = vlv_dpio_read(display->drm, phy, reg);
+ vlv_dpio_put(display->drm);
/*
* This assumes !override is only used when the port is disabled.
@@ -1659,14 +1677,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1679,10 +1696,10 @@ static bool chv_pipe_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
drm_WARN_ON(display->drm, ctrl << 16 != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1691,36 +1708,35 @@ static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_pipe_power_well_sync_hw(struct intel_display *display,
@@ -1766,7 +1782,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
new file mode 100644
index 000000000000..7bd09d981cd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -0,0 +1,2932 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_REGS_H__
+#define __INTEL_DISPLAY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
+
+#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
+
+#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
+#define MIPIO_RST_CTRL (1 << 2)
+
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
+#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
+#define COMMON_RESET_DIS (1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
+
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 _MMIO(0x4F074)
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
+
+#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
+#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
+#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
+#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
+#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
+#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
+#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
+#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
+#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
+
+#define DERRMR _MMIO(0x44050)
+/* Note that HBLANK events are reserved on bdw+ */
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
+
+#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
+ VLV_IER, \
+ VLV_IIR)
+
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
+#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
+#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
+#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
+#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
+#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
+#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
+
+#define IPS_CTL _MMIO(0x43408)
+#define IPS_ENABLE REG_BIT(31)
+#define IPS_FALSE_COLOR REG_BIT(4)
+
+/*
+ * Clock control & power management
+ */
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+
+#define VGA0 _MMIO(0x6000)
+#define VGA1 _MMIO(0x6004)
+#define VGA_PD _MMIO(0x6010)
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
+
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
+#define PHY_LDO_DELAY_0NS 0x0
+#define PHY_LDO_DELAY_200NS 0x1
+#define PHY_LDO_DELAY_600NS 0x2
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
+
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
+#define _FPA0 0x6040
+#define _FPA1 0x6044
+#define _FPB0 0x6048
+#define _FPB1 0x604c
+#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+
+#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1 << 15)
+
+#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
+#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
+
+#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
+
+/*
+ * Overlay regs
+ */
+#define OVADD _MMIO(0x30000)
+#define DOVSTA _MMIO(0x30008)
+#define OC_BUF (0x3 << 20)
+#define OGAMC5 _MMIO(0x30010)
+#define OGAMC4 _MMIO(0x30014)
+#define OGAMC3 _MMIO(0x30018)
+#define OGAMC2 _MMIO(0x3001c)
+#define OGAMC1 _MMIO(0x30020)
+#define OGAMC0 _MMIO(0x30024)
+
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
+
+#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
+#define DPCE_GATING_DIS REG_BIT(17)
+
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DUPS1_GATING_DIS (1 << 15)
+#define DUPS2_GATING_DIS (1 << 19)
+#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
+/*
+ * Display engine regs
+ */
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define _TRANS_HTOTAL_B 0x61000
+#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+
+#define _TRANS_HBLANK_A 0x60004
+#define _TRANS_HBLANK_B 0x61004
+#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+
+#define _TRANS_HSYNC_A 0x60008
+#define _TRANS_HSYNC_B 0x61008
+#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+
+#define _TRANS_VTOTAL_A 0x6000c
+#define _TRANS_VTOTAL_B 0x6100c
+#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+
+#define _TRANS_VBLANK_A 0x60010
+#define _TRANS_VBLANK_B 0x61010
+#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+
+#define _TRANS_VSYNC_A 0x60014
+#define _TRANS_VSYNC_B 0x61014
+#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+
+#define _PIPEASRC 0x6001c
+#define _PIPEBSRC 0x6101c
+#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
+#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
+#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
+
+#define _BCLRPAT_A 0x60020
+#define _BCLRPAT_B 0x61020
+#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+
+#define _TRANS_MULT_A 0x6002c
+#define _TRANS_MULT_B 0x6102c
+#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
+/* HDMI/DP bits are g4x+ */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
+/* CRT/TV common between gen3+ */
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define _GEN3_SDVOB 0x61140
+#define _GEN3_SDVOC 0x61160
+#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
+#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
+#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
+#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
+#define PCH_SDVOB _MMIO(0xe1140)
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC _MMIO(0xe1150)
+#define PCH_HDMID _MMIO(0xe1160)
+
+#define PORT_DFT_I9XX _MMIO(0x61150)
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL_SHIFT 30
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/*
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA _MMIO(0x61178)
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
+#define VIDEO_DIP_GMP_DATA_SIZE 36
+#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
+#define VIDEO_DIP_CTL _MMIO(0x61170)
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
+
+#define PCH_GTC_CTL _MMIO(0xe7000)
+#define PCH_GTC_ENABLE (1 << 31)
+
+/* Display Port */
+#define DP_A _MMIO(0x64000) /* eDP */
+#define DP_B _MMIO(0x64100)
+#define DP_C _MMIO(0x64200)
+#define DP_D _MMIO(0x64300)
+#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
+#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
+#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
+#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define TU_SIZE_MASK REG_GENMASK(30, 25)
+#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
+#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
+#define DATA_LINK_N_MAX (0x800000)
+
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
+#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Pipe A */
+#define _PIPEADSL 0x70000
+#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
+#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANSACONF 0x70008
+#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
+/*
+ * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
+ * DBL=power saving pixel doubling, PF-ID* requires panel fitter
+ */
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
+#define _PIPEASTAT 0x70024
+#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
+#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
+#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
+#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
+#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
+#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+
+#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
+
+#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
+
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH 0x70040
+#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+
+#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_G4X 0x70040
+#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+
+#define _PIPEA_FLIPCOUNT_G4X 0x70044
+#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
+
+/* CHV pipe B blender */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_BLEND_MASK REG_GENMASK(31, 30)
+#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
+#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
+#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
+
+#define _CHV_CANVAS_A 0x60a04
+#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
+#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
+#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
+
+/*
+ * VBIOS flags
+ * gen2:
+ * [00:06] alm,mgm
+ * [10:16] all
+ * [30:32] alm,mgm
+ * gen3+:
+ * [00:0f] all
+ * [10:1f] all
+ * [30:32] all
+ */
+#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
+#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
+#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
+#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL _MMIO(0x45300)
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define _PIPEA_DATA_M1 0x60030
+#define _PIPEB_DATA_M1 0x61030
+#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+
+#define _PIPEA_DATA_N1 0x60034
+#define _PIPEB_DATA_N1 0x61034
+#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+
+#define _PIPEA_DATA_M2 0x60038
+#define _PIPEB_DATA_M2 0x61038
+#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+
+#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEB_DATA_N2 0x6103c
+#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+
+#define _PIPEA_LINK_M1 0x60040
+#define _PIPEB_LINK_M1 0x61040
+#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+
+#define _PIPEA_LINK_N1 0x60044
+#define _PIPEB_LINK_N1 0x61044
+#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+
+#define _PIPEA_LINK_M2 0x60048
+#define _PIPEB_LINK_M2 0x61048
+#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+
+#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEB_LINK_N2 0x6104c
+#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
+
+/*
+ * Skylake scalers
+ */
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
+#define _PS_1A_CTRL 0x68180
+#define _PS_2A_CTRL 0x68280
+#define _PS_1B_CTRL 0x68980
+#define _PS_2B_CTRL 0x68A80
+#define _PS_1C_CTRL 0x69180
+#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define PS_SCALER_EN REG_BIT(31)
+#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
+#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
+#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
+#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
+#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
+#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
+#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
+#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
+#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
+#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
+#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
+#define PS_BINDING_MASK REG_GENMASK(27, 25)
+#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
+#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
+#define PS_FILTER_MASK REG_GENMASK(24, 23)
+#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
+#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
+#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
+#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
+#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
+#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
+#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
+#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
+#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
+#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
+#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
+#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
+#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
+#define PS_PWRUP_PROGRESS REG_BIT(17)
+#define PS_V_FILTER_BYPASS REG_BIT(8)
+#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
+#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
+#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
+#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
+#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
+#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
+#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
+#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
+#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
+#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
+#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
+#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
+#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
+#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
+#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
+
+#define _PS_PWR_GATE_1A 0x68160
+#define _PS_PWR_GATE_2A 0x68260
+#define _PS_PWR_GATE_1B 0x68960
+#define _PS_PWR_GATE_2B 0x68A60
+#define _PS_PWR_GATE_1C 0x69160
+#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
+#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
+#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
+#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
+#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
+#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
+#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
+#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
+#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
+#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
+#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
+
+#define _PS_WIN_POS_1A 0x68170
+#define _PS_WIN_POS_2A 0x68270
+#define _PS_WIN_POS_1B 0x68970
+#define _PS_WIN_POS_2B 0x68A70
+#define _PS_WIN_POS_1C 0x69170
+#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
+#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
+
+#define _PS_WIN_SZ_1A 0x68174
+#define _PS_WIN_SZ_2A 0x68274
+#define _PS_WIN_SZ_1B 0x68974
+#define _PS_WIN_SZ_2B 0x68A74
+#define _PS_WIN_SZ_1C 0x69174
+#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
+#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
+
+#define _PS_VSCALE_1A 0x68184
+#define _PS_VSCALE_2A 0x68284
+#define _PS_VSCALE_1B 0x68984
+#define _PS_VSCALE_2B 0x68A84
+#define _PS_VSCALE_1C 0x69184
+#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+
+#define _PS_HSCALE_1A 0x68190
+#define _PS_HSCALE_2A 0x68290
+#define _PS_HSCALE_1B 0x68990
+#define _PS_HSCALE_2B 0x68A90
+#define _PS_HSCALE_1C 0x69190
+#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+
+#define _PS_VPHASE_1A 0x68188
+#define _PS_VPHASE_2A 0x68288
+#define _PS_VPHASE_1B 0x68988
+#define _PS_VPHASE_2B 0x68A88
+#define _PS_VPHASE_1C 0x69188
+#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
+#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
+#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
+#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
+
+#define _PS_HPHASE_1A 0x68194
+#define _PS_HPHASE_2A 0x68294
+#define _PS_HPHASE_1B 0x68994
+#define _PS_HPHASE_2B 0x68A94
+#define _PS_HPHASE_1C 0x69194
+#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+
+#define _PS_ECC_STAT_1A 0x681D0
+#define _PS_ECC_STAT_2A 0x682D0
+#define _PS_ECC_STAT_1B 0x689D0
+#define _PS_ECC_STAT_2B 0x68AD0
+#define _PS_ECC_STAT_1C 0x691D0
+#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
+
+#define _PS_COEF_SET0_INDEX_1A 0x68198
+#define _PS_COEF_SET0_INDEX_2A 0x68298
+#define _PS_COEF_SET0_INDEX_1B 0x68998
+#define _PS_COEF_SET0_INDEX_2B 0x68A98
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
+#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
+
+#define _PS_COEF_SET0_DATA_1A 0x6819C
+#define _PS_COEF_SET0_DATA_2A 0x6829C
+#define _PS_COEF_SET0_DATA_1B 0x6899C
+#define _PS_COEF_SET0_DATA_2B 0x68A9C
+#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
+
+#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
+
+#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
+#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
+#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
+#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl-mtl */
+#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl */
+#define GEN12_PIPEDMC_FLIPQ_DONE REG_BIT(24) /* tgl-adl */
+#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
+#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
+#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
+#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
+#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
+#define MTL_PIPEDMC_FLIPQ_DONE REG_BIT(17) /* mtl */
+#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
+#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
+#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
+#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
+#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
+#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
+#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
+#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
+#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
+#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
+#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
+#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
+#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
+#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
+ REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
+#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
+#define GEN8_PIPE_VSYNC REG_BIT(1)
+#define GEN8_PIPE_VBLANK REG_BIT(0)
+
+#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
+ GEN8_DE_PIPE_IER(pipe), \
+ GEN8_DE_PIPE_IIR(pipe))
+
+#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
+#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
+
+#define GEN8_DE_PORT_ISR _MMIO(0x44440)
+#define GEN8_DE_PORT_IMR _MMIO(0x44444)
+#define GEN8_DE_PORT_IIR _MMIO(0x44448)
+#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
+#define ICL_AUX_CHANNEL_E (1 << 29)
+#define ICL_AUX_CHANNEL_F (1 << 28)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
+#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
+#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
+#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
+#define BXT_DE_PORT_GMBUS (1 << 1)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
+#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
+#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
+#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
+#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
+#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
+#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
+#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
+#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
+#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
+#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
+
+#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
+ GEN8_DE_PORT_IER, \
+ GEN8_DE_PORT_IIR)
+
+#define GEN8_DE_MISC_ISR _MMIO(0x44460)
+#define GEN8_DE_MISC_IMR _MMIO(0x44464)
+#define GEN8_DE_MISC_IIR _MMIO(0x44468)
+#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
+#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
+#define GEN8_DE_MISC_GSE REG_BIT(27)
+#define GEN8_DE_EDP_PSR REG_BIT(19)
+#define XELPDP_PMDEMAND_RSP REG_BIT(3)
+#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
+
+#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
+ GEN8_DE_MISC_IER, \
+ GEN8_DE_MISC_IIR)
+
+#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
+#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
+#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
+#define GEN11_DE_PCH_IRQ (1 << 23)
+#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
+#define GEN11_DE_PORT_IRQ (1 << 20)
+#define GEN11_DE_PIPE_C (1 << 18)
+#define GEN11_DE_PIPE_B (1 << 17)
+#define GEN11_DE_PIPE_A (1 << 16)
+
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC1))
+#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
+
+#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
+ GEN11_DE_HPD_IER, \
+ GEN11_DE_HPD_IIR)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
+#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
+#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
+#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
+#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
+#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
+#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
+#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
+#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+
+#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
+ PICAINTERRUPT_IER, \
+ PICAINTERRUPT_IIR)
+
+#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
+#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
+#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
+#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
+#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
+#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
+#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+
+#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
+#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
+#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
+#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
+#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
+#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
+#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
+
+#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
+#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
+#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
+#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
+#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
+
+#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
+#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
+
+#define FUSE_STRAP _MMIO(0x42014)
+#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
+#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
+#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
+#define IVB_PIPE_C_DISABLE REG_BIT(28)
+#define ILK_HDCP_DISABLE REG_BIT(25)
+#define ILK_eDP_A_DISABLE REG_BIT(24)
+#define HSW_CDCLK_LIMIT REG_BIT(24)
+#define ILK_DESKTOP REG_BIT(23)
+#define HSW_CPU_SSC_ENABLE REG_BIT(21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT REG_BIT(1)
+
+#define CHICKEN_MISC_2 _MMIO(0x42084)
+#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
+#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
+#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
+#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
+#define GLK_CL2_PWR_DOWN REG_BIT(12)
+#define GLK_CL1_PWR_DOWN REG_BIT(11)
+#define GLK_CL0_PWR_DOWN REG_BIT(10)
+
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
+#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
+#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
+#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
+#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
+#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
+
+#define DISP_ARB_CTL2 _MMIO(0x45004)
+#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
+#define DISP_IPC_ENABLE REG_BIT(3)
+
+#define GEN7_MSG_CTL _MMIO(0x45010)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
+#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
+
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
+
+#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
+
+#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
+#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
+#define DCPR_MASK_LPMODE REG_BIT(26)
+#define DCPR_SEND_RESP_IMM REG_BIT(25)
+#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+
+#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
+#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
+
+#define SKL_DFSM _MMIO(0x51000)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+
+#define XE2LPD_DE_CAP _MMIO(0x41100)
+#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
+#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
+#define XE2LPD_DE_CAP_DSC_REMOVED 1
+#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
+#define XE2LPD_DE_CAP_SCALER_SINGLE 1
+
+#define SKL_DSSM _MMIO(0x51004)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
+#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+
+#define PCH_DISPLAY_BASE 0xc0000u
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+
+/* south display engine interrupt: CPT - CNP */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT | \
+ SDE_PORTA_HOTPLUG_SPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
+
+/* south display engine interrupt: ICP/TGP/MTP */
+#define SDE_PICAINTERRUPT REG_BIT(31)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
+#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
+#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
+#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
+#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+
+#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
+ SDEIER, \
+ SDEIIR)
+
+#define SERR_INT _MMIO(0xc4040)
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
+#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
+#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
+#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
+#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
+#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
+#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
+#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
+
+#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
+#define PORTE_HOTPLUG_ENABLE (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+#define SHPD_FILTER_CNT_250 0x000F8
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define _PCH_FPB0 0xc6048
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define FP_CB_TUNE (0x3 << 22)
+
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB1 0xc604c
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST _MMIO(0xc606c)
+
+#define PCH_DREF_CONTROL _MMIO(0xC6200)
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3 << 12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3 << 10)
+#define RAWCLK_FREQ_MASK 0x3ff
+#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
+#define CNP_RAWCLK_DIV(div) ((div) << 16)
+#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
+#define ICP_RAWCLK_NUM(num) ((num) << 11)
+
+#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
+
+#define PCH_SSC4_PARMS _MMIO(0xc6210)
+#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
+
+#define PCH_DPLL_SEL _MMIO(0xc7000)
+#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
+
+/* transcoder */
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+
+#define _PCH_TRANSA_LINK_N2 0xe004c
+#define _PCH_TRANSB_LINK_N2 0xe104c
+#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+/* Per-transcoder DIP controls (PCH) */
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+
+#define _VIDEO_DIP_GCP_A 0xe0210
+#define _VIDEO_DIP_GCP_B 0xe1210
+#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
+
+/* Per-transcoder DIP controls (VLV) */
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+
+/* Haswell DIP controls */
+#define _HSW_VIDEO_DIP_CTL_A 0x60200
+#define _HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+
+#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+
+/*ADLP and later: */
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
+
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
+#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+
+#define _HSW_VIDEO_DIP_GCP_A 0x60210
+#define _HSW_VIDEO_DIP_GCP_B 0x61210
+#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+
+#define _HSW_STEREO_3D_CTL_A 0x70020
+#define _HSW_STEREO_3D_CTL_B 0x71020
+#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
+#define S3D_ENABLE (1 << 31)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
+#define TRANS_ENABLE REG_BIT(31)
+#define TRANS_STATE_ENABLE REG_BIT(30)
+#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
+#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
+#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
+#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
+#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
+#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
+#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
+#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
+#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
+#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+
+#define PCH_DP_B _MMIO(0xe4100)
+#define PCH_DP_C _MMIO(0xe4200)
+#define PCH_DP_D _MMIO(0xe4300)
+
+/* CPT */
+#define _TRANS_DP_CTL_A 0xe0300
+#define _TRANS_DP_CTL_B 0xe1300
+#define _TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
+#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
+#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
+#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
+#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
+#define TRANS_DP_ENH_FRAMING REG_BIT(18)
+#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
+#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
+#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
+#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
+#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
+
+#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
+
+/*
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
+ * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
+ */
+#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
+#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
+#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define HSW_PW_CTL_IDX_GLOBAL 15
+
+/* SKL/BXT/GLK power wells */
+#define SKL_PW_CTL_IDX_PW_2 15
+#define SKL_PW_CTL_IDX_PW_1 14
+#define GLK_PW_CTL_IDX_AUX_C 10
+#define GLK_PW_CTL_IDX_AUX_B 9
+#define GLK_PW_CTL_IDX_AUX_A 8
+#define SKL_PW_CTL_IDX_DDI_D 4
+#define SKL_PW_CTL_IDX_DDI_C 3
+#define SKL_PW_CTL_IDX_DDI_B 2
+#define SKL_PW_CTL_IDX_DDI_A_E 1
+#define GLK_PW_CTL_IDX_DDI_A 1
+#define SKL_PW_CTL_IDX_MISC_IO 0
+
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
+#define ICL_PW_CTL_IDX_PW_4 3
+#define ICL_PW_CTL_IDX_PW_3 2
+#define ICL_PW_CTL_IDX_PW_2 1
+#define ICL_PW_CTL_IDX_PW_1 0
+
+/* XE_LPD - power wells */
+#define XELPD_PW_CTL_IDX_PW_D 8
+#define XELPD_PW_CTL_IDX_PW_C 7
+#define XELPD_PW_CTL_IDX_PW_B 6
+#define XELPD_PW_CTL_IDX_PW_A 5
+
+#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
+#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
+#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
+#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
+#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define XELPD_PW_CTL_IDX_AUX_E 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define XELPD_PW_CTL_IDX_AUX_D 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
+#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
+#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
+#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
+#define ICL_PW_CTL_IDX_AUX_C 2
+#define ICL_PW_CTL_IDX_AUX_B 1
+#define ICL_PW_CTL_IDX_AUX_A 0
+
+#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define XELPD_PW_CTL_IDX_DDI_E 8
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define XELPD_PW_CTL_IDX_DDI_D 7
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
+#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
+#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
+#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
+#define ICL_PW_CTL_IDX_DDI_C 2
+#define ICL_PW_CTL_IDX_DDI_B 1
+#define ICL_PW_CTL_IDX_DDI_A 0
+
+/* HSW - power well misc debug registers */
+#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
+#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+
+/* SKL Fuse Status */
+enum skl_power_gate {
+ SKL_PG0,
+ SKL_PG1,
+ SKL_PG2,
+ ICL_PG3,
+ ICL_PG4,
+};
+
+#define SKL_FUSE_STATUS _MMIO(0x42000)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
+#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
+
+/* Per-pipe DDI Function Control */
+#define _TRANS_DDI_FUNC_CTL_A 0x60400
+#define _TRANS_DDI_FUNC_CTL_B 0x61400
+#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
+#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
+#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
+
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_SHIFT 28
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
+#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
+
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
+
+#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
+#define DISABLE_DPT_CLK_GATING REG_BIT(1)
+
+/* DisplayPort Transport Control */
+#define _DP_TP_CTL_A 0x64040
+#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
+#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
+
+/* DisplayPort Transport Status */
+#define _DP_TP_STATUS_A 0x64044
+#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
+#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
+
+/* DDI Buffer Control */
+#define _DDI_BUF_CTL_A 0x64000
+#define _DDI_BUF_CTL_B 0x64100
+/* Known as DDI_CTL_DE in MTL+ */
+#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE REG_BIT(31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
+#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
+#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
+#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
+#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
+#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
+#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
+#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
+#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
+#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
+#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
+#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
+ (symbols))
+#define DDI_BUF_IS_IDLE REG_BIT(7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
+#define DDI_A_4_LANES REG_BIT(4)
+#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
+#define DDI_PORT_WIDTH_SHIFT 1
+#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
+
+/* DDI Buffer Translations */
+#define _DDI_BUF_TRANS_A 0x64E00
+#define _DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
+#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE _MMIO(0xC6020)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
+
+/* SPLL */
+#define SPLL_CTL _MMIO(0x46020)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
+
+/* WRPLL */
+#define _WRPLL_CTL1 0x46040
+#define _WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
+
+/* Port clock selection */
+#define _PORT_CLK_SEL_A 0x46100
+#define _PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
+
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
+
+/* Transcoder clock selection */
+#define _TRANS_CLK_SEL_A 0x46140
+#define _TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
+#define CDCLK_FREQ _MMIO(0x46200)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define _TRANSC_MSA_MISC 0x62410
+#define _TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
+/* See DP_MSA_MISC_* for the bit definitions */
+
+#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
+#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
+#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
+#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
+#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
+#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
+
+/* LCPLL Control */
+#define LCPLL_CTL _MMIO(0x130040)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
+
+/*
+ * SKL Clocks
+ */
+/* CDCLK_CTL */
+#define CDCLK_CTL _MMIO(0x46000)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL _MMIO(0x46010)
+#define LCPLL2_CTL _MMIO(0x46014)
+#define LCPLL_PLL_ENABLE (1 << 31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 _MMIO(0x6C058)
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 _MMIO(0x6C05C)
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
+
+/* DPLL Status */
+#define DPLL_STATUS _MMIO(0x6C060)
+#define DPLL_LOCK(id) (1 << ((id) * 8))
+
+/* DPLL cfg */
+#define _DPLL1_CFGCR1 0x6C040
+#define _DPLL2_CFGCR1 0x6C048
+#define _DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define _DPLL1_CFGCR2 0x6C044
+#define _DPLL2_CFGCR2 0x6C04C
+#define _DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
+#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+/* ICL Clocks */
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - TC_PORT_4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
+ (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
+ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/*
+ * DG1 Clocks
+ * First registers controls the first A and B, while the second register
+ * controls the phy C and D. The bits on these registers are the
+ * same, but refer to different phys
+ */
+#define _DG1_DPCLKA_CFGCR0 0x164280
+#define _DG1_DPCLKA1_CFGCR0 0x16C280
+#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
+#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
+#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
+ _DG1_DPCLKA_CFGCR0, \
+ _DG1_DPCLKA1_CFGCR0)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
+/* ICL PLL */
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
+
+#define _DG2_PLL3_ENABLE 0x4601C
+
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
+
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
+#define _MG_PLL1_ENABLE 0x46030
+#define _MG_PLL2_ENABLE 0x46034
+#define _MG_PLL3_ENABLE 0x46038
+#define _MG_PLL4_ENABLE 0x4603C
+/* Bits are the same as _DPLL0_ENABLE */
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
+ _MG_PLL2_ENABLE)
+
+/* DG1 PLL */
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
+
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
+#define _ICL_DPLL0_CFGCR0 0x164000
+#define _ICL_DPLL1_CFGCR0 0x164080
+#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+ _ICL_DPLL1_CFGCR0)
+#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
+#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
+#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
+#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
+#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
+#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
+#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
+#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
+#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
+
+#define _ICL_DPLL0_CFGCR1 0x164004
+#define _ICL_DPLL1_CFGCR1 0x164084
+#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+ _ICL_DPLL1_CFGCR1)
+#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
+#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
+#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
+#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
+#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
+#define DPLL_CFGCR1_KDIV_1 (1 << 6)
+#define DPLL_CFGCR1_KDIV_2 (2 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
+#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
+#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR1_PDIV_2 (1 << 2)
+#define DPLL_CFGCR1_PDIV_3 (2 << 2)
+#define DPLL_CFGCR1_PDIV_5 (4 << 2)
+#define DPLL_CFGCR1_PDIV_7 (8 << 2)
+#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
+#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
+
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
+#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0)
+
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
+#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1)
+
+#define _DG1_DPLL2_CFGCR0 0x16C284
+#define _DG1_DPLL3_CFGCR0 0x16C28C
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
+
+#define _DG1_DPLL2_CFGCR1 0x16C288
+#define _DG1_DPLL3_CFGCR1 0x16C290
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
+
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
+
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
+
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL _MMIO(0x6d000)
+#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
+#define BXT_DE_PLL_RATIO_MASK 0xff
+
+#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
+#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
+#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
+#define ICL_CDCLK_PLL_RATIO(x) (x)
+#define ICL_CDCLK_PLL_RATIO_MASK 0xff
+
+/* GEN9 DC */
+#define DC_STATE_EN _MMIO(0x45504)
+#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
+
+#define D_COMP_BDW _MMIO(0x138144)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP _MMIO(0xc2014)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
+
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
+#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
+ ICL_PHY_MISC(port))
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
+
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
+
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
+#define CLKREQ_POLICY _MMIO(0x101038)
+#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
+#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
+#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
+#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
+
+
+#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1f2798404f2c..f5f38dca14d7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "intel_clock_gating.h"
#include "intel_cx0_phy.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
@@ -107,14 +108,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_cx0_pll_power_save_wa(display);
- intel_hpd_init(i915);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..56c4024201c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
+#include "intel_runtime_pm.h"
+
+static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return &i915->runtime_pm;
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_runtime_pm_get_raw(display_to_rpm(display));
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return intel_runtime_pm_get(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return intel_runtime_pm_get_noresume(display_to_rpm(display));
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(display_to_rpm(display), wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ intel_runtime_pm_put_unchecked(display_to_rpm(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return intel_runtime_pm_suspended(display_to_rpm(display));
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ assert_rpm_wakelock_held(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ disable_rpm_wakeref_asserts(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ enable_rpm_wakeref_asserts(display_to_rpm(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 4074a1879828..82ea1ec482e4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,6 +8,9 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_core.h"
+#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -43,12 +46,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
return;
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ if (DISPLAY_VER(display) < 6)
return;
if (drm_crtc_vblank_get(crtc))
@@ -81,3 +85,24 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
state->rps_interactive = interactive;
}
+
+void ilk_display_rps_enable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_disable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ gen5_rps_irq_handler(&to_gt(i915)->rps);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index 556891edb2dd..183d154f2c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -13,10 +13,34 @@ struct drm_crtc;
struct intel_atomic_state;
struct intel_display;
+#ifdef I915
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
+void ilk_display_rps_enable(struct intel_display *display);
+void ilk_display_rps_disable(struct intel_display *display);
+void ilk_display_rps_irq_handler(struct intel_display *display);
+#else
+static inline void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+}
+static inline void intel_display_rps_mark_interactive(struct intel_display *display,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+}
+static inline void ilk_display_rps_enable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_disable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+}
+#endif
#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 25ba043cbb65..66087302fdbc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -7,6 +7,7 @@
#include "intel_display_core.h"
#include "intel_display_device.h"
+#include "intel_display_irq.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
#include "intel_dmc.h"
@@ -20,6 +21,7 @@ struct intel_display_snapshot {
struct intel_display_params params;
struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
+ struct intel_display_irq_snapshot *irq;
};
struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display)
@@ -38,6 +40,7 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
+ snapshot->irq = intel_display_irq_snapshot_capture(display);
snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
@@ -57,6 +60,7 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p);
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
+ intel_display_irq_snapshot_print(snapshot->irq, p);
intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -68,6 +72,7 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
+ kfree(snapshot->irq);
kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 99a6fd2900b9..ce45261c4a8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_panel.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
@@ -145,6 +146,8 @@ struct intel_framebuffer {
unsigned int min_alignment;
unsigned int vtd_guard;
+
+ unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
};
enum intel_hotplug_state {
@@ -384,6 +387,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Simple drm_panel */
+ struct drm_panel *base;
+
/* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
const struct drm_edid *fixed_edid;
@@ -550,6 +556,10 @@ struct intel_connector {
struct intel_dp *dp;
} mst;
+ struct {
+ int force_bpp_x16;
+ } link;
+
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
@@ -581,7 +591,7 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct __intel_global_objs_state *global_objs;
int num_global_objs;
@@ -591,7 +601,7 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
+ struct intel_dpll_state dpll_state[I915_NUM_PLLS];
struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
@@ -1075,8 +1085,8 @@ struct intel_crtc_state {
* haswell. */
struct dpll dpll;
- /* Selected dpll when shared or NULL. */
- struct intel_shared_dpll *shared_dpll;
+ /* Selected dpll or NULL. */
+ struct intel_dpll *intel_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -1086,7 +1096,7 @@ struct intel_crtc_state {
* setting shared_dpll and dpll_hw_state to one of these reserved ones.
*/
struct icl_port_dpll {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
struct intel_dpll_hw_state hw_state;
} icl_port_dplls[ICL_PORT_DPLL_COUNT];
@@ -1114,6 +1124,7 @@ struct intel_crtc_state {
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
+ u8 active_non_psr_pipes;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1292,8 +1303,9 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
/* For DSB based pipe updates */
- struct intel_dsb *dsb_color_vblank, *dsb_commit;
+ struct intel_dsb *dsb_color, *dsb_commit;
bool use_dsb;
+ bool use_flipq;
u32 psr2_man_track_ctl;
@@ -1360,6 +1372,21 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
};
+enum intel_flipq_id {
+ INTEL_FLIPQ_PLANE_1,
+ INTEL_FLIPQ_PLANE_2,
+ INTEL_FLIPQ_PLANE_3,
+ INTEL_FLIPQ_GENERAL,
+ INTEL_FLIPQ_FAST,
+ MAX_INTEL_FLIPQ,
+};
+
+struct intel_flipq {
+ u32 start_mmioaddr;
+ enum intel_flipq_id flipq_id;
+ u8 tail;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1386,11 +1413,15 @@ struct intel_crtc {
struct drm_pending_vblank_event *flip_done_event;
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
+ /* armed event for flip queue based updates */
+ struct drm_pending_vblank_event *flipq_event;
- /* Access to these should be protected by dev_priv->irq_lock. */
+ /* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
+ struct intel_flipq flipq[MAX_INTEL_FLIPQ];
+
/* per-pipe watermark state */
struct {
/* watermarks currently being used */
@@ -1439,7 +1470,7 @@ struct intel_crtc {
struct intel_pipe_crc pipe_crc;
#endif
- bool block_dc_for_vblank;
+ bool vblank_psr_notify;
};
struct intel_plane_error {
@@ -1512,6 +1543,8 @@ struct intel_plane {
bool async_flip);
void (*enable_flip_done)(struct intel_plane *plane);
void (*disable_flip_done)(struct intel_plane *plane);
+ /* For drm_panic */
+ void (*disable_tiling)(struct intel_plane *plane);
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -1620,7 +1653,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
- bool paused;
+ int pause_counter;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1650,6 +1683,8 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+
+ u8 active_non_psr_pipes;
};
struct intel_dp {
@@ -1658,12 +1693,13 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
- bool link_trained;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
- u8 pr_dpcd;
+ u8 pr_dpcd[DP_PANEL_REPLAY_CAP_SIZE];
+#define INTEL_PR_DPCD_INDEX(pr_dpcd_register) ((pr_dpcd_register) - DP_PANEL_REPLAY_CAP_SUPPORT)
+
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
@@ -1683,6 +1719,7 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ bool active;
/* common rate,lane_count configs in bw order */
int num_configs;
#define INTEL_DP_MAX_LANE_COUNT 4
@@ -1739,7 +1776,7 @@ struct intel_dp {
struct {
struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mgr;
- int active_links;
+ int active_streams;
} mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
@@ -1805,12 +1842,16 @@ struct intel_dp {
struct {
u8 io_wake_lines;
u8 fast_wake_lines;
+ enum transcoder transcoder;
+ struct mutex lock;
/* LNL and beyond */
u8 check_entry_lines;
u8 aux_less_wake_lines;
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
+ bool lobf_disable_debug;
+ bool sink_alpm_error;
} alpm_parameters;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index e5a8022db664..f57280e9d041 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,38 +3,39 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_wa.h"
-static void gen11_display_wa_apply(struct drm_i915_private *i915)
+static void gen11_display_wa_apply(struct intel_display *display)
{
/* Wa_14010594013 */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
-static void xe_d_display_wa_apply(struct drm_i915_private *i915)
+static void xe_d_display_wa_apply(struct intel_display *display)
{
/* Wa_14013723622 */
- intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
+ intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
-static void adlp_display_wa_apply(struct drm_i915_private *i915)
+static void adlp_display_wa_apply(struct intel_display *display)
{
/* Wa_22011091694:adlp */
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
/* Bspec/49189 Initialize Sequence */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-void intel_display_wa_apply(struct drm_i915_private *i915)
+void intel_display_wa_apply(struct intel_display *display)
{
- if (IS_ALDERLAKE_P(i915))
- adlp_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 12)
- xe_d_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 11)
- gen11_display_wa_apply(i915);
+ if (display->platform.alderlake_p)
+ adlp_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 12)
+ xe_d_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 11)
+ gen11_display_wa_apply(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index be644ab6ae00..babd9d16603d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -8,14 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
-void intel_display_wa_apply(struct drm_i915_private *i915);
+void intel_display_wa_apply(struct intel_display *display);
#ifdef I915
-static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+static inline bool intel_display_needs_wa_16023588340(struct intel_display *display)
+{
+ return false;
+}
#else
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0813fb9b5823..35e919eae369 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
@@ -24,7 +25,9 @@ dkl_phy_set_hip_idx(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
enum tc_port tc_port = DKL_REG_TC_PORT(reg);
- drm_WARN_ON(display->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+ if (drm_WARN_ON(display->drm,
+ tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS))
+ return;
intel_de_write(display,
HIP_INDEX_REG(tc_port),
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
index 56085b32956d..f8ffeec29e93 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "intel_display_reg_defs.h"
+
struct intel_dkl_phy_reg {
u32 reg:24;
u32 bank_idx:4;
@@ -151,6 +153,7 @@ struct intel_dkl_phy_reg {
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
+#define LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define _DKL_TX_FW_CALIB_LN0 0x02F8
#define _DKL_TX_FW_CALIB_LN1 0x12F8
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa6944e55d95..744f51c0eab8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -24,12 +24,22 @@
#include <linux/debugfs.h>
#include <linux/firmware.h>
+#include <drm/drm_vblank.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
+#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
+#include "intel_flipq.h"
#include "intel_step.h"
/**
@@ -57,6 +67,10 @@ struct intel_dmc {
const char *fw_path;
u32 max_fw_size; /* bytes */
u32 version;
+ struct {
+ u32 dc5_start;
+ u32 count;
+ } dc6_allowed;
struct dmc_fw_info {
u32 mmio_count;
i915_reg_t mmioaddr[20];
@@ -167,11 +181,11 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002 ||
+ DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -183,39 +197,39 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
} else if (DISPLAY_VERx100(display) == 1400) {
fw_path = MTL_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
fw_path = DG2_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
fw_path = ADLP_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
fw_path = ADLS_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
fw_path = DG1_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
fw_path = RKL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
+ } else if (display->platform.tigerlake) {
fw_path = TGL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(display) == 11) {
fw_path = ICL_DMC_PATH;
max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
+ } else if (display->platform.geminilake) {
fw_path = GLK_DMC_PATH;
max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
+ } else if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake) {
fw_path = KBL_DMC_PATH;
max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
fw_path = SKL_DMC_PATH;
max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
+ } else if (display->platform.broxton) {
fw_path = BXT_DMC_PATH;
max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
@@ -420,29 +434,26 @@ static void disable_event_handler(struct intel_display *display,
REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
+ DMC_EVENT_FALSE));
intel_de_write(display, htp_reg, 0);
}
-static void disable_all_event_handlers(struct intel_display *display)
+static void disable_all_event_handlers(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
{
- enum intel_dmc_id dmc_id;
+ int handler;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(display) < 12)
return;
- for_each_dmc_id(dmc_id) {
- int handler;
-
- if (!has_dmc_id_fw(display, dmc_id))
- continue;
+ if (!has_dmc_id_fw(display, dmc_id))
+ return;
- for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
- disable_event_handler(display,
- DMC_EVT_CTL(display, dmc_id, handler),
- DMC_EVT_HTP(display, dmc_id, handler));
- }
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(display,
+ DMC_EVT_CTL(display, dmc_id, handler),
+ DMC_EVT_HTP(display, dmc_id, handler));
}
static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
@@ -474,41 +485,36 @@ static void mtl_pipedmc_clock_gating_wa(struct intel_display *display)
* for pipe A and B.
*/
intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0,
- MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+ MTL_PIPEDMC_GATING_DIS(PIPE_A) |
+ MTL_PIPEDMC_GATING_DIS(PIPE_B));
}
static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
{
- if (DISPLAY_VER(display) >= 14 && enable)
+ if (display->platform.meteorlake && enable)
mtl_pipedmc_clock_gating_wa(display);
else if (DISPLAY_VER(display) == 13)
adlp_pipedmc_clock_gating_wa(display, enable);
}
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe)
+static u32 pipedmc_interrupt_mask(struct intel_display *display)
{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+ /*
+ * FIXME PIPEDMC_ERROR not enabled for now due to LNL pipe B
+ * triggering it during the first DC state transition. Figure
+ * out what is going on...
+ */
+ return PIPEDMC_FLIPQ_PROG_DONE |
+ PIPEDMC_GTT_FAULT |
+ PIPEDMC_ATS_FAULT;
}
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
+static u32 dmc_evt_ctl_disable(void)
{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVENT_FALSE);
}
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
@@ -531,12 +537,19 @@ static bool is_dmc_evt_htp_reg(struct intel_display *display,
return offset >= start && offset < end;
}
+static bool is_event_handler(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ i915_reg_t reg, u32 data)
+{
+ return is_dmc_evt_ctl_reg(display, dmc_id, reg) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
return false;
@@ -545,13 +558,13 @@ static bool disable_dmc_evt(struct intel_display *display,
return true;
/* also disable the flip queue event on the main DMC on TGL */
- if (IS_TIGERLAKE(i915) &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ if (display->platform.tigerlake &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data))
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
- if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
return true;
return false;
@@ -564,14 +577,267 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE);
+ return dmc_evt_ctl_disable();
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
+static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc_mmiodata(display, dmc, dmc_id, i));
+ }
+}
+
+static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ disable_all_event_handlers(display, dmc_id);
+
+ preempt_disable();
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(display,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
+ }
+
+ preempt_enable();
+
+ dmc_load_mmio(display, dmc_id);
+}
+
+static void assert_dmc_loaded(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 expected, found;
+ int i;
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0));
+ expected = dmc->dmc_info[dmc_id].payload[0];
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, expected, found);
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+
+ found = intel_de_read(display, reg);
+ expected = dmc_mmiodata(display, dmc, dmc_id, i);
+
+ /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
+ if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
+ found &= ~DMC_EVT_CTL_ENABLE;
+ expected &= ~DMC_EVT_CTL_ENABLE;
+ }
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
+ }
+}
+
+void assert_main_dmc_loaded(struct intel_display *display)
+{
+ assert_dmc_loaded(display, DMC_FW_MAIN);
+}
+
+static bool need_pipedmc_load_program(struct intel_display *display)
+{
+ /* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */
+ return DISPLAY_VER(display) == 12;
+}
+
+static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
+{
+ /*
+ * PTL:
+ * - pipe A/B DMC doesn't need save/restore
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (DISPLAY_VER(display) == 30)
+ return pipe >= PIPE_C;
+
+ /*
+ * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0
+ * save/restore, but so far unable to see the loss of pipe DMC state
+ * in action. Are we just failing to turn off PG0 due to some other
+ * SoC level stuff?
+ */
+ if (DISPLAY_VER(display) == 20)
+ return false;
+
+ /*
+ * FIXME BMG untested, main DMC firmware has the
+ * pipe DMC A/B PG0 save/restore...
+ */
+ if (display->platform.battlemage)
+ return false;
+
+ /*
+ * DG2:
+ * - Pipe DMCs presumably in PG0?
+ * - No DC6, and even DC9 doesn't seem to result
+ * in loss of DMC state for whatever reason
+ */
+ if (display->platform.dg2)
+ return false;
+
+ /*
+ * ADL/MTL:
+ * - pipe A/B DMC is in PG0, saved/restored by the main DMC
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (IS_DISPLAY_VER(display, 13, 14))
+ return pipe >= PIPE_C;
+
+ return false;
+}
+
+static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * On TGL/derivatives pipe DMC state is lost when PG1 is disabled.
+ * Do not even enable the pipe DMC when that can happen outside
+ * of driver control (PSR+DC5/6).
+ */
+ if (DISPLAY_VER(display) == 12 && crtc_state->has_psr)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (!can_enable_pipedmc(crtc_state)) {
+ intel_dmc_disable_pipe(crtc_state);
+ return;
+ }
+
+ if (need_pipedmc_load_program(display))
+ dmc_load_program(display, dmc_id);
+ else if (need_pipedmc_load_mmio(display, pipe))
+ dmc_load_mmio(display, dmc_id);
+
+ assert_dmc_loaded(display, dmc_id);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_flipq_reset(display, pipe);
+
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display));
+ }
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0);
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+
+ intel_flipq_reset(display, pipe);
+ }
+}
+
+static void dmc_configure_event(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ bool enable)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int num_handlers = 0;
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+ u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
+
+ if (!is_event_handler(display, dmc_id, event_id, reg, data))
+ continue;
+
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ num_handlers++;
+ }
+
+ drm_WARN_ONCE(display->drm, num_handlers != 1,
+ "DMC %d has %d handlers for event 0x%x\n",
+ dmc_id, num_handlers, event_id);
+}
+
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @display: display instance
@@ -582,39 +848,27 @@ static u32 dmc_mmiodata(struct intel_display *display,
*/
void intel_dmc_load_program(struct intel_display *display)
{
- struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
- u32 i;
if (!intel_dmc_has_payload(display))
return;
- pipedmc_clock_gating_wa(display, true);
-
- disable_all_event_handlers(display);
+ assert_display_rpm_held(display);
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- preempt_disable();
+ pipedmc_clock_gating_wa(display, true);
for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
- intel_de_write_fw(display,
- DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
- dmc->dmc_info[dmc_id].payload[i]);
- }
+ dmc_load_program(display, dmc_id);
+ assert_dmc_loaded(display, dmc_id);
}
- preempt_enable();
-
- for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
- intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc_mmiodata(display, dmc, dmc_id, i));
- }
- }
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL,
+ PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) |
+ PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) |
+ PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) |
+ PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A));
power_domains->dc_state = 0;
@@ -632,26 +886,17 @@ void intel_dmc_load_program(struct intel_display *display)
*/
void intel_dmc_disable_program(struct intel_display *display)
{
+ enum intel_dmc_id dmc_id;
+
if (!intel_dmc_has_payload(display))
return;
pipedmc_clock_gating_wa(display, true);
- disable_all_event_handlers(display);
- pipedmc_clock_gating_wa(display, false);
-}
-void assert_dmc_loaded(struct intel_display *display)
-{
- struct intel_dmc *dmc = display_to_dmc(display);
+ for_each_dmc_id(dmc_id)
+ disable_all_event_handlers(display, dmc_id);
- drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n");
- drm_WARN_ONCE(display->drm, dmc &&
- !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
- "DMC program storage start is NULL\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE),
- "DMC SSP Base Not fine\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL),
- "DMC HTP Not fine\n");
+ pipedmc_clock_gating_wa(display, false);
}
static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
@@ -1006,9 +1251,7 @@ static void intel_dmc_runtime_pm_put(struct intel_display *display)
static const char *dmc_fallback_path(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(i915))
+ if (display->platform.alderlake_p)
return ADLP_DMC_FALLBACK_PATH;
return NULL;
@@ -1072,7 +1315,6 @@ out:
*/
void intel_dmc_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc;
if (!HAS_DMC(display))
@@ -1115,7 +1357,7 @@ void intel_dmc_init(struct intel_display *display)
display->dmc.dmc = dmc;
drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path);
- queue_work(i915->unordered_wq, &dmc->work);
+ queue_work(display->wq.unordered, &dmc->work);
return;
@@ -1146,6 +1388,17 @@ void intel_dmc_suspend(struct intel_display *display)
intel_dmc_runtime_pm_put(display);
}
+void intel_dmc_wait_fw_load(struct intel_display *display)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+
+ if (!HAS_DMC(display))
+ return;
+
+ if (dmc)
+ flush_work(&dmc->work);
+}
+
/**
* intel_dmc_resume() - init DMC firmware during system resume
* @display: display instance
@@ -1232,18 +1485,56 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct
DMC_VERSION_MINOR(snapshot->version));
}
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
+ bool start_tracking)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 dc5_cur_count;
+
+ if (DISPLAY_VER(dmc->display) < 14)
+ return;
+
+ dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
+
+ if (!start_tracking)
+ dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
+
+ dmc->dc6_allowed.dc5_start = dc5_cur_count;
+}
+
+static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
+{
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
+ bool dc6_enabled;
+
+ if (DISPLAY_VER(display) < 14)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+ dc6_enabled = intel_de_read(display, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6;
+ if (dc6_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
+ *count = dmc->dc6_allowed.count;
+ mutex_unlock(&power_domains->lock);
+
+ return true;
+}
+
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_display *display = m->private;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc = display_to_dmc(display);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+ u32 dc6_allowed_count;
if (!HAS_DMC(display))
return -ENODEV;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
@@ -1254,7 +1545,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
- str_yes_no(IS_ALDERLAKE_P(i915) ||
+ str_yes_no(display->platform.alderlake_p ||
DISPLAY_VER(display) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
@@ -1268,7 +1559,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 12) {
i915_reg_t dc3co_reg;
- if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.dgfx || DISPLAY_VER(display) >= 14) {
dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -1280,14 +1571,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(display, dc3co_reg));
} else {
- dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ dc5_reg = display->platform.broxton ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
- if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ if (!display->platform.geminilake && !display->platform.broxton)
dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
- if (i915_mmio_reg_valid(dc6_reg))
+
+ if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
+ seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
+ dc6_allowed_count);
+ else if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(display, dc6_reg));
@@ -1299,7 +1594,7 @@ out:
intel_de_read(display, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -1313,3 +1608,73 @@ void intel_dmc_debugfs_register(struct intel_display *display)
debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ u32 tmp = 0, int_vector;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe));
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp);
+
+ if (tmp & PIPEDMC_FLIPQ_PROG_DONE) {
+ spin_lock(&display->drm->event_lock);
+
+ if (crtc->flipq_event) {
+ /*
+ * Update vblank counter/timestamp in case it
+ * hasn't been done yet for this frame.
+ */
+ drm_crtc_accurate_vblank_count(&crtc->base);
+
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event);
+ crtc->flipq_event = NULL;
+ }
+
+ spin_unlock(&display->drm->event_lock);
+ }
+
+ if (tmp & PIPEDMC_ATS_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_GTT_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_ERROR)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
+ crtc->base.base.id, crtc->base.name);
+ }
+
+ int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
+ if (tmp == 0 && int_vector != 0)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
+ crtc->base.base.id, crtc->base.name, tmp);
+}
+
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, true);
+}
+
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, false);
+}
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dmc *dmc = display_to_dmc(display);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 44cecef98e73..40e9dcb033cc 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -9,15 +9,23 @@
#include <linux/types.h>
enum pipe;
+enum pipedmc_event_id;
struct drm_printer;
+struct intel_crtc;
+struct intel_crtc_state;
struct intel_display;
struct intel_dmc_snapshot;
void intel_dmc_init(struct intel_display *display);
void intel_dmc_load_program(struct intel_display *display);
+void intel_dmc_wait_fw_load(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state);
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state);
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block);
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable);
void intel_dmc_fini(struct intel_display *display);
void intel_dmc_suspend(struct intel_display *display);
void intel_dmc_resume(struct intel_display *display);
@@ -26,7 +34,18 @@ void intel_dmc_debugfs_register(struct intel_display *display);
struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
-void assert_dmc_loaded(struct intel_display *display);
+void assert_main_dmc_loaded(struct intel_display *display);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc);
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 1bf446f96a10..c5aa49921cb9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -6,7 +6,273 @@
#ifndef __INTEL_DMC_REGS_H__
#define __INTEL_DMC_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
+
+enum dmc_event_id {
+ DMC_EVENT_TRUE = 0x0,
+ DMC_EVENT_FALSE = 0x1,
+};
+
+enum maindmc_event_id {
+ MAINDMC_EVENT_CMP_ZERO = 0x8,
+ MAINDMC_EVENT_CMP_ODD = 0x9,
+ MAINDMC_EVENT_CMP_NEG = 0xa,
+ MAINDMC_EVENT_CMP_CARRY = 0xb,
+
+ MAINDMC_EVENT_TMR0_DONE = 0x14,
+ MAINDMC_EVENT_TMR1_DONE = 0x15,
+ MAINDMC_EVENT_TMR2_DONE = 0x16,
+ MAINDMC_EVENT_COUNT0_DONE = 0x17,
+ MAINDMC_EVENT_COUNT1_DONE = 0x18,
+ MAINDMC_EVENT_PERF_CNTR_DARBF = 0x19,
+
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_A_TRIGGER = 0x22,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_B_TRIGGER = 0x23,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_C_TRIGGER = 0x24,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_D_TRIGGER = 0x25,
+ MAINDMC_EVENT_1KHZ_FQ_A_TRIGGER = 0x26,
+ MAINDMC_EVENT_1KHZ_FQ_B_TRIGGER = 0x27,
+ MAINDMC_EVENT_1KHZ_FQ_C_TRIGGER = 0x28,
+ MAINDMC_EVENT_1KHZ_FQ_D_TRIGGER = 0x29,
+ MAINDMC_EVENT_SCANLINE_COMP_A = 0x2a,
+ MAINDMC_EVENT_SCANLINE_COMP_B = 0x2b,
+ MAINDMC_EVENT_SCANLINE_COMP_C = 0x2c,
+ MAINDMC_EVENT_SCANLINE_COMP_D = 0x2d,
+ MAINDMC_EVENT_VBLANK_DELAYED_A = 0x2e,
+ MAINDMC_EVENT_VBLANK_DELAYED_B = 0x2f,
+ MAINDMC_EVENT_VBLANK_DELAYED_C = 0x30,
+ MAINDMC_EVENT_VBLANK_DELAYED_D = 0x31,
+ MAINDMC_EVENT_VBLANK_A = 0x32,
+ MAINDMC_EVENT_VBLANK_B = 0x33,
+ MAINDMC_EVENT_VBLANK_C = 0x34,
+ MAINDMC_EVENT_VBLANK_D = 0x35,
+ MAINDMC_EVENT_HBLANK_A = 0x36,
+ MAINDMC_EVENT_HBLANK_B = 0x37,
+ MAINDMC_EVENT_HBLANK_C = 0x38,
+ MAINDMC_EVENT_HBLANK_D = 0x39,
+ MAINDMC_EVENT_VSYNC_A = 0x3a,
+ MAINDMC_EVENT_VSYNC_B = 0x3b,
+ MAINDMC_EVENT_VSYNC_C = 0x3c,
+ MAINDMC_EVENT_VSYNC_D = 0x3d,
+ MAINDMC_EVENT_SCANLINE_A = 0x3e,
+ MAINDMC_EVENT_SCANLINE_B = 0x3f,
+ MAINDMC_EVENT_SCANLINE_C = 0x40,
+ MAINDMC_EVENT_SCANLINE_D = 0x41,
+
+ MAINDMC_EVENT_PLANE1_FLIP_A = 0x42,
+ MAINDMC_EVENT_PLANE2_FLIP_A = 0x43,
+ MAINDMC_EVENT_PLANE3_FLIP_A = 0x44,
+ MAINDMC_EVENT_PLANE4_FLIP_A = 0x45,
+ MAINDMC_EVENT_PLANE5_FLIP_A = 0x46,
+ MAINDMC_EVENT_PLANE6_FLIP_A = 0x47,
+ MAINDMC_EVENT_PLANE7_FLIP_A = 0x48,
+ MAINDMC_EVENT_PLANE1_FLIP_B = 0x49,
+ MAINDMC_EVENT_PLANE2_FLIP_B = 0x4a,
+ MAINDMC_EVENT_PLANE3_FLIP_B = 0x4b,
+ MAINDMC_EVENT_PLANE4_FLIP_B = 0x4c,
+ MAINDMC_EVENT_PLANE5_FLIP_B = 0x4d,
+ MAINDMC_EVENT_PLANE6_FLIP_B = 0x4e,
+ MAINDMC_EVENT_PLANE7_FLIP_B = 0x4f,
+ MAINDMC_EVENT_PLANE1_FLIP_C = 0x50,
+ MAINDMC_EVENT_PLANE2_FLIP_C = 0x51,
+ MAINDMC_EVENT_PLANE3_FLIP_C = 0x52,
+ MAINDMC_EVENT_PLANE4_FLIP_C = 0x53,
+ MAINDMC_EVENT_PLANE5_FLIP_C = 0x54,
+ MAINDMC_EVENT_PLANE6_FLIP_C = 0x55,
+ MAINDMC_EVENT_PLANE7_FLIP_C = 0x56,
+ MAINDMC_EVENT_PLANE1_FLIP_D = 0x57,
+ MAINDMC_EVENT_PLANE2_FLIP_D = 0x58,
+ MAINDMC_EVENT_PLANE3_FLIP_D = 0x59,
+ MAINDMC_EVENT_PLANE4_FLIP_D = 0x5a,
+ MAINDMC_EVENT_PLANE5_FLIP_D = 0x5b,
+ MAINDMC_EVENT_PLANE6_FLIP_D = 0x5c,
+ MAINDMC_EVENT_PLANE7_FLIP_D = 0x5d,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_A = 0x5e,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_A = 0x5f,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_A = 0x60,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_A = 0x61,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_A = 0x62,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_A = 0x63,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_A = 0x64,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_B = 0x65,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_B = 0x66,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_B = 0x67,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_B = 0x68,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_B = 0x69,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_B = 0x6a,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_B = 0x6b,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_C = 0x6c,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_C = 0x6d,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_C = 0x6e,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_C = 0x6f,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_C = 0x70,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_C = 0x71,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_C = 0x72,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_D = 0x73,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_D = 0x74,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_D = 0x75,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_D = 0x76,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_D = 0x77,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_D = 0x78,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_D = 0x79,
+
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL1 = 0x7d,
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL2 = 0x7e,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL1 = 0x7f,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL2 = 0x80,
+
+ MAINDMC_EVENT_RENUKE_A = 0x85,
+ MAINDMC_EVENT_RENUKE_B = 0x86,
+ MAINDMC_EVENT_RENUKE_C = 0x87,
+ MAINDMC_EVENT_RENUKE_D = 0x88,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_A = 0x89,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_B = 0x8a,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_C = 0x8b,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_D = 0x8c,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_A = 0x8d,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_B = 0x8e,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_C = 0x8f,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_D = 0x90,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_A = 0x91,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_B = 0x92,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_C = 0x93,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_D = 0x94,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_A = 0x95,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_B = 0x96,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_C = 0x97,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_D = 0x98,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_A = 0x99,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_B = 0x9a,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_C = 0x9b,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_D = 0x9c,
+ MAINDMC_EVENT_DISP_PCH_INT = 0x9d,
+ MAINDMC_EVENT_GTT_ERR = 0x9e,
+ MAINDMC_EVENT_VTD_ERR = 0x9f,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_A = 0xa0,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_B = 0xa1,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_C = 0xa2,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_D = 0xa3,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_A = 0xa4,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_B = 0xa5,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_C = 0xa6,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_D = 0xa7,
+
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_EDP = 0xb2,
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_DSI = 0xb3,
+ MAINDMC_EVENT_DCPR_DMC_CSR_START = 0xb4,
+ MAINDMC_EVENT_IN_PSR = 0xb5,
+
+ MAINDMC_EVENT_IN_MEMUP = 0xb7,
+ MAINDMC_EVENT_IN_VGA = 0xb8,
+
+ MAINDMC_EVENT_IN_KVM_SESSION = 0xba,
+ MAINDMC_EVENT_DEWAKE = 0xbb,
+
+ MAINDMC_EVENT_TRAP_HIT = 0xbd,
+ MAINDMC_EVENT_CLK_USEC = 0xbe,
+ MAINDMC_EVENT_CLK_MSEC = 0xbf,
+
+ MAINDMC_EVENT_CHICKEN1 = 0xc8,
+ MAINDMC_EVENT_CHICKEN2 = 0xc9,
+ MAINDMC_EVENT_CHICKEN3 = 0xca,
+ MAINDMC_EVENT_DDT_UBP = 0xcb,
+
+ MAINDMC_EVENT_HP_LATENCY = 0xcd,
+ MAINDMC_EVENT_LP_LATENCY = 0xce,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL1 = 0xcf,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL2 = 0xd0,
+
+ MAINDMC_EVENT_DG_DMC_EVT_0 = 0xd3,
+ MAINDMC_EVENT_DG_DMC_EVT_1 = 0xd4,
+ MAINDMC_EVENT_DG_DMC_EVT_2 = 0xd5,
+ MAINDMC_EVENT_DG_DMC_EVT_3 = 0xd6,
+ MAINDMC_EVENT_DG_DMC_EVT_4 = 0xd7,
+ MAINDMC_EVENT_DACFE_CLK_STOP = 0xd8,
+ MAINDMC_EVENT_DACFE_AZILIA_SDI_WAKE = 0xd9,
+ MAINDMC_EVENT_AUDIO_DOUBLE_FUNC_GRP_RST = 0xda,
+ MAINDMC_EVENT_AUDIO_CMD_VALID = 0xdb,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_BCLK = 0xdc,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_CDCLK = 0xdd,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_A = 0xde,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_B = 0xdf,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_C = 0xe0,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_E = 0xe1,
+ MAINDMC_EVENT_CMTG_SCANLINE_IN_GB_DC6v = 0xe2,
+ MAINDMC_EVENT_DCPR_CMTG_SCANLINE_OUTSIDE_GB = 0xe3,
+ MAINDMC_EVENT_DC6v_BACKWARD_COMPAT = 0xe4,
+ MAINDMC_EVENT_DPMA_PM_ABORT = 0xe5,
+
+ MAINDMC_EVENT_STACK_OVF = 0xfc,
+ MAINDMC_EVENT_NO_CLAIM = 0xfd,
+ MAINDMC_EVENT_UNK_CMD = 0xfe,
+ MAINDMC_EVENT_HTP_MOD = 0xff,
+};
+
+enum pipedmc_event_id {
+ PIPEDMC_EVENT_TMR0_DONE = 0x14,
+ PIPEDMC_EVENT_TMR1_DONE = 0x15,
+ PIPEDMC_EVENT_TMR2_DONE = 0x16,
+ PIPEDMC_EVENT_COUNT0_DONE = 0x17,
+ PIPEDMC_EVENT_COUNT1_DONE = 0x18,
+ PIPEDMC_EVENT_PGA_PGB_RESTORE_DONE = 0x19,
+ PIPEDMC_EVENT_PG1_PG2_RESTORE_DONE = 0x1a,
+ PIPEDMC_EVENT_PGA_PGB_SAVE_DONE = 0x1b,
+ PIPEDMC_EVENT_PG1_PG2_SAVE_DONE = 0x1c,
+
+ PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER = 0x2b,
+ PIPEDMC_EVENT_1KHZ_FQ_TRIGGER = 0x2c,
+ PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER = 0x2d,
+ PIPEDMC_EVENT_SCANLINE_INRANGE = 0x2e,
+ PIPEDMC_EVENT_SCANLINE_OUTRANGE = 0x2f,
+ PIPEDMC_EVENT_SCANLINE_EQUAL = 0x30,
+ PIPEDMC_EVENT_DELAYED_VBLANK = 0x31,
+ PIPEDMC_EVENT_VBLANK = 0x32,
+ PIPEDMC_EVENT_HBLANK = 0x33,
+ PIPEDMC_EVENT_VSYNC = 0x34,
+ PIPEDMC_EVENT_SCANLINE_FROM_DMUX = 0x35,
+ PIPEDMC_EVENT_PLANE1_FLIP = 0x36,
+ PIPEDMC_EVENT_PLANE2_FLIP = 0x37,
+ PIPEDMC_EVENT_PLANE3_FLIP = 0x38,
+ PIPEDMC_EVENT_PLANE4_FLIP = 0x39,
+ PIPEDMC_EVENT_PLANE5_FLIP = 0x3a,
+ PIPEDMC_EVENT_PLANE6_FLIP = 0x3b,
+ PIPEDMC_EVENT_PLANE7_FLIP = 0x3c,
+ PIPEDMC_EVENT_ADAPTIVE_DCB_TRIGGER = 0x3d,
+
+ PIPEDMC_EVENT_PLANE1_FLIP_DONE = 0x56,
+ PIPEDMC_EVENT_PLANE2_FLIP_DONE = 0x57,
+ PIPEDMC_EVENT_PLANE3_FLIP_DONE = 0x58,
+ PIPEDMC_EVENT_PLANE4_FLIP_DONE = 0x59,
+ PIPEDMC_EVENT_PLANE5_FLIP_DONE = 0x5a,
+ PIPEDMC_EVENT_PLANE6_FLIP_DONE = 0x5b,
+ PIPEDMC_EVENT_PLANE7_FLIP_DONE = 0x5c,
+
+ PIPEDMC_EVENT_GTT_ERR = 0x9b,
+
+ PIPEDMC_EVENT_IN_PSR = 0xb5,
+ PIPEDMC_EVENT_DSI_DMC_IDLE = 0xb6,
+ PIPEDMC_EVENT_PSR2_DMC_IDLE = 0xb7,
+ PIPEDMC_EVENT_IN_VGA = 0xb8,
+
+ PIPEDMC_EVENT_TRAP_HIT = 0xbd,
+ PIPEDMC_EVENT_CLK_USEC = 0xbe,
+ PIPEDMC_EVENT_CLK_MSEC = 0xbf,
+
+ PIPEDMC_EVENT_CHICKEN1 = 0xc8,
+ PIPEDMC_EVENT_CHICKEN2 = 0xc9,
+ PIPEDMC_EVENT_CHICKEN3 = 0xca,
+ PIPEDMC_EVENT_DDT_UBP = 0xcb,
+
+ PIPEDMC_EVENT_LP_LATENCY = 0xce,
+
+ PIPEDMC_EVENT_LACE_PART_A_HIST_TRIGGER = 0xdf,
+ PIPEDMC_EVENT_LACE_PART_B_HIST_TRIGGER = 0xe0,
+
+ PIPEDMC_EVENT_STACK_OVF = 0xfc,
+ PIPEDMC_EVENT_NO_CLAIM = 0xfd,
+ PIPEDMC_EVENT_UNK_CMD = 0xfe,
+ PIPEDMC_EVENT_HTP_MOD = 0xff,
+};
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
@@ -21,6 +287,179 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+#define _PIPEDMC_LOAD_HTP_A 0x5f000
+#define _PIPEDMC_LOAD_HTP_B 0x5f400
+#define PIPEDMC_LOAD_HTP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_LOAD_HTP_A, _PIPEDMC_LOAD_HTP_B)
+
+#define _PIPEDMC_CTL_A 0x5f064
+#define _PIPEDMC_CTL_B 0x5f464
+#define PIPEDMC_CTL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_CTL_A, _PIPEDMC_CTL_B)
+#define PIPEDMC_HALT REG_BIT(31)
+#define PIPEDMC_STEP REG_BIT(27)
+#define PIPEDMC_CLOCKGATE REG_BIT(23)
+
+#define _PIPEDMC_STATUS_A 0x5f06c
+#define _PIPEDMC_STATUS_B 0x5f46c
+#define PIPEDMC_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_STATUS_A, _PIPEDMC_STATUS_B)
+#define PIPEDMC_SSP REG_GENMASK(31, 16)
+#define PIPEDMC_INT_VECTOR_MASK REG_GENMASK(15, 8)
+/* PIPEDMC_INT_VECTOR values defined by firmware */
+#define PIPEDMC_INT_VECTOR_SCANLINE_COMP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x1)
+#define PIPEDMC_INT_VECTOR_DC6V_FLIPQ_OVERLAP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x2)
+#define PIPEDMC_INT_VECTOR_FLIPQ_PROG_DONE REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0xff) /* Wa_16018781658:lnl[a0] */
+#define PIPEDMC_EVT_PENDING REG_GENMASK(7, 0)
+
+#define _PIPEDMC_FQ_CTRL_A 0x5f078
+#define _PIPEDMC_FQ_CTRL_B 0x5f478
+#define PIPEDMC_FQ_CTRL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_CTRL_A, _PIPEDMC_FQ_CTRL_B)
+#define PIPEDMC_FQ_CTRL_ENABLE REG_BIT(31)
+#define PIPEDMC_FQ_CTRL_ASYNC REG_BIT(29)
+#define PIPEDMC_FQ_CTRL_PREEMPT REG_BIT(0)
+
+#define _PIPEDMC_FQ_STATUS_A 0x5f098
+#define _PIPEDMC_FQ_STATUS_B 0x5f498
+#define PIPEDMC_FQ_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_STATUS_A, _PIPEDMC_FQ_STATUS_B)
+#define PIPEDMC_FQ_STATUS_BUSY REG_BIT(31)
+#define PIPEDMC_FQ_STATUS_W2_LIVE_STATUS REG_BIT(1)
+#define PIPEDMC_FQ_STATUS_W1_LIVE_STATUS REG_BIT(0)
+
+#define _PIPEDMC_FPQ_ATOMIC_TP_A 0x5f0a0
+#define _PIPEDMC_FPQ_ATOMIC_TP_B 0x5f4a0
+#define PIPEDMC_FPQ_ATOMIC_TP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_ATOMIC_TP_A, _PIPEDMC_FPQ_ATOMIC_TP_B)
+#define PIPEDMC_FPQ_PLANEQ_3_TP_MASK REG_GENMASK(31, 26)
+#define PIPEDMC_FPQ_PLANEQ_3_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_2_TP_MASK REG_GENMASK(24, 19)
+#define PIPEDMC_FPQ_PLANEQ_2_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_1_TP_MASK REG_GENMASK(17, 12)
+#define PIPEDMC_FPQ_PLANEQ_1_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, (tail))
+#define PIPEDMC_FPQ_FASTQ_TP_MASK REG_GENMASK(10, 6)
+#define PIPEDMC_FPQ_FASTQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_FASTQ_TP_MASK, (tail))
+#define PIPEDMC_FPQ_GENERALQ_TP_MASK REG_GENMASK(4, 0)
+#define PIPEDMC_FPQ_GENERALQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_GENERALQ_TP_MASK, (tail))
+
+#define _PIPEDMC_FPQ_LINES_TO_W1_A 0x5f0a4
+#define _PIPEDMC_FPQ_LINES_TO_W1_B 0x5f4a4
+#define PIPEDMC_FPQ_LINES_TO_W1 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W1_A, _PIPEDMC_FPQ_LINES_TO_W1_B)
+
+#define _PIPEDMC_FPQ_LINES_TO_W2_A 0x5f0a8
+#define _PIPEDMC_FPQ_LINES_TO_W2_B 0x5f4a8
+#define PIPEDMC_FPQ_LINES_TO_W2 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W2_A, _PIPEDMC_FPQ_LINES_TO_W2_B)
+
+#define _PIPEDMC_SCANLINECMP_A 0x5f11c
+#define _PIPEDMC_SCANLINECMP_B 0x5f51c
+#define PIPEDMC_SCANLINECMP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMP_A, _PIPEDMC_SCANLINECMP_B)
+#define PIPEDMC_SCANLINECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINE_NUMBER REG_GENMASK(20, 0)
+
+#define _PIPEDMC_SCANLINECMPLOWER_A 0x5f120
+#define _PIPEDMC_SCANLINECMPLOWER_B 0x5f520
+#define PIPEDMC_SCANLINECMPLOWER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPLOWER_A, _PIPEDMC_SCANLINECMPLOWER_B)
+#define PIPEDMC_SCANLINEINRANGECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINEOUTRANGECMP_EN REG_BIT(30)
+#define PIPEDMC_SCANLINE_LOWER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_LOWER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_LOWER_MASK, (scanline))
+
+#define _PIPEDMC_SCANLINECMPUPPER_A 0x5f124
+#define _PIPEDMC_SCANLINECMPUPPER_B 0x5f524
+#define PIPEDMC_SCANLINECMPUPPER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPUPPER_A, _PIPEDMC_SCANLINECMPUPPER_B)
+#define PIPEDMC_SCANLINE_UPPER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_UPPER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_UPPER_MASK, (scanline))
+
+#define _MMIO_PIPEDMC_FPQ(pipe, fq_id, \
+ reg_fpq1_a, reg_fpq2_a, reg_fpq3_a, reg_fpq4_a, \
+ reg_fpq1_b, reg_fpq2_b, reg_fpq3_b, reg_fpq4_b) \
+ _MMIO(_PICK_EVEN_2RANGES((fq_id), INTEL_FLIPQ_PLANE_3, \
+ _PIPE((pipe), (reg_fpq1_a), (reg_fpq1_b)), \
+ _PIPE((pipe), (reg_fpq2_a), (reg_fpq2_b)), \
+ _PIPE((pipe), (reg_fpq3_a), (reg_fpq3_b)), \
+ _PIPE((pipe), (reg_fpq4_a), (reg_fpq4_b))))
+
+#define _PIPEDMC_FPQ1_HP_A 0x5f128
+#define _PIPEDMC_FPQ2_HP_A 0x5f138
+#define _PIPEDMC_FPQ3_HP_A 0x5f168
+#define _PIPEDMC_FPQ4_HP_A 0x5f174
+#define _PIPEDMC_FPQ5_HP_A 0x5f180
+#define _PIPEDMC_FPQ1_HP_B 0x5f528
+#define _PIPEDMC_FPQ2_HP_B 0x5f538
+#define _PIPEDMC_FPQ3_HP_B 0x5f568
+#define _PIPEDMC_FPQ4_HP_B 0x5f574
+#define _PIPEDMC_FPQ5_HP_B 0x5f580
+#define PIPEDMC_FPQ_HP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_HP_A, _PIPEDMC_FPQ2_HP_A, \
+ _PIPEDMC_FPQ3_HP_A, _PIPEDMC_FPQ4_HP_A, \
+ _PIPEDMC_FPQ1_HP_B, _PIPEDMC_FPQ2_HP_B, \
+ _PIPEDMC_FPQ3_HP_B, _PIPEDMC_FPQ4_HP_B)
+
+#define _PIPEDMC_FPQ1_TP_A 0x5f12c
+#define _PIPEDMC_FPQ2_TP_A 0x5f13c
+#define _PIPEDMC_FPQ3_TP_A 0x5f16c
+#define _PIPEDMC_FPQ4_TP_A 0x5f178
+#define _PIPEDMC_FPQ5_TP_A 0x5f184
+#define _PIPEDMC_FPQ1_TP_B 0x5f52c
+#define _PIPEDMC_FPQ2_TP_B 0x5f53c
+#define _PIPEDMC_FPQ3_TP_B 0x5f56c
+#define _PIPEDMC_FPQ4_TP_B 0x5f578
+#define _PIPEDMC_FPQ5_TP_B 0x5f584
+#define PIPEDMC_FPQ_TP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_TP_A, _PIPEDMC_FPQ2_TP_A, \
+ _PIPEDMC_FPQ3_TP_A, _PIPEDMC_FPQ4_TP_A, \
+ _PIPEDMC_FPQ1_TP_B, _PIPEDMC_FPQ2_TP_B, \
+ _PIPEDMC_FPQ3_TP_B, _PIPEDMC_FPQ4_TP_B)
+
+#define _PIPEDMC_FPQ1_CHP_A 0x5f130
+#define _PIPEDMC_FPQ2_CHP_A 0x5f140
+#define _PIPEDMC_FPQ3_CHP_A 0x5f170
+#define _PIPEDMC_FPQ4_CHP_A 0x5f17c
+#define _PIPEDMC_FPQ5_CHP_A 0x5f188
+#define _PIPEDMC_FPQ1_CHP_B 0x5f530
+#define _PIPEDMC_FPQ2_CHP_B 0x5f540
+#define _PIPEDMC_FPQ3_CHP_B 0x5f570
+#define _PIPEDMC_FPQ4_CHP_B 0x5f57c
+#define _PIPEDMC_FPQ5_CHP_B 0x5f588
+#define PIPEDMC_FPQ_CHP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_CHP_A, _PIPEDMC_FPQ2_CHP_A, \
+ _PIPEDMC_FPQ3_CHP_A, _PIPEDMC_FPQ4_CHP_A, \
+ _PIPEDMC_FPQ1_CHP_B, _PIPEDMC_FPQ2_CHP_B, \
+ _PIPEDMC_FPQ3_CHP_B, _PIPEDMC_FPQ4_CHP_B)
+
+#define _PIPEDMC_FPQ_TS_A 0x5f134
+#define _PIPEDMC_FPQ_TS_B 0x5f534
+#define PIPEDMC_FPQ_TS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_TS_A, _PIPEDMC_FPQ_TS_B)
+
+#define _PIPEDMC_SCANLINE_RO_A 0x5f144
+#define _PIPEDMC_SCANLINE_RO_B 0x5f544
+#define PIPEDMC_SCANLINE_RO(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINE_RO_A, _PIPEDMC_SCANLINE_RO_B)
+
+#define _PIPEDMC_FPQ_CTL1_A 0x5f160
+#define _PIPEDMC_FPQ_CTL1_B 0x5f560
+#define PIPEDMC_FPQ_CTL1(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL1_A, _PIPEDMC_FPQ_CTL1_B)
+#define PIPEDMC_SW_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_FPQ_CTL2_A 0x5f164
+#define _PIPEDMC_FPQ_CTL2_B 0x5f564
+#define PIPEDMC_FPQ_CTL2(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL2_A, _PIPEDMC_FPQ_CTL2_B)
+#define PIPEDMC_DMC_INT_AT_DELAYED_VBLANK REG_BIT(1)
+#define PIPEDMC_W1_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_INTERRUPT_A 0x5f190 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_B 0x5f590 /* lnl+ */
+#define PIPEDMC_INTERRUPT(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_A, _PIPEDMC_INTERRUPT_B)
+#define _PIPEDMC_INTERRUPT_MASK_A 0x5f194 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_MASK_B 0x5f594 /* lnl+ */
+#define PIPEDMC_INTERRUPT_MASK(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_MASK_A, _PIPEDMC_INTERRUPT_MASK_B)
+#define PIPEDMC_FLIPQ_PROG_DONE REG_BIT(3)
+#define PIPEDMC_ERROR REG_BIT(2)
+#define PIPEDMC_GTT_FAULT REG_BIT(1)
+#define PIPEDMC_ATS_FAULT REG_BIT(0)
+
+#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
+#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
+#define PIPEDMC_BLOCK_PKGC_SW(pipe) _MMIO_PIPE(pipe, \
+ PIPEDMC_BLOCK_PKGC_SW_A, \
+ PIPEDMC_BLOCK_PKGC_SW_B)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS BIT(31)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_UNTIL_NEXT_FRAMESTART BIT(15)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
@@ -57,12 +496,7 @@
#define DMC_EVT_CTL_TYPE_LEVEL_1 1
#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
-
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
-#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
-#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
-/* An event handler scheduled to run at a 1 kHz frequency. */
-#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
@@ -103,4 +537,51 @@
#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+#define DMC_FQ_W2_PTS_CFG_SEL _MMIO(0x8f240)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(26, 24)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(18, 16)
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(10, 8)
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(2, 0)
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+
+/* plane/general flip queue entries */
+#define PIPEDMC_FQ_RAM(start_mmioaddr, i) _MMIO((start_mmioaddr) + (i) * 4)
+/* LNL */
+/* DW0 pts */
+/* DW1 head */
+/* DW2 size/etc. */
+#define LNL_FQ_INTERRUPT REG_BIT(31)
+#define LNL_FQ_DSB_ID_MASK REG_GENMASK(30, 29)
+#define LNL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(LNL_FQ_DSB_ID_MASK, (dsb_id))
+#define LNL_FQ_EXECUTED REG_BIT(28)
+#define LNL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define LNL_FQ_DSB_SIZE(size) REG_FIELD_PREP(LNL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 reserved (plane queues) */
+/* DW3 second DSB head (general queue) */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 reserved (general queue) */
+
+/* PTL+ */
+/* DW0 pts */
+/* DW1 reserved */
+/* DW2 size/etc. */
+#define PTL_FQ_INTERRUPT REG_BIT(31)
+#define PTL_FQ_NEED_PUSH REG_BIT(30)
+#define PTL_FQ_BLOCK_PUSH REG_BIT(29)
+#define PTL_FQ_EXECUTED REG_BIT(28)
+#define PTL_FQ_DSB_ID_MASK REG_GENMASK(25, 24)
+#define PTL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(PTL_FQ_DSB_ID_MASK, (dsb_id))
+#define PTL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define PTL_FQ_DSB_SIZE(size) REG_FIELD_PREP(PTL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 head */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 second DSB head (general queue) */
+
+/* undocumented magic DMC variables */
+#define PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6b8)
+#define PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6c0)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 7e2ce0c2f6c3..b3bb89ba34f9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -7,9 +7,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -155,12 +154,11 @@ static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
static void __intel_dmc_wl_release(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc_wl *wl = &display->wl;
WARN_ON(refcount_read(&wl->refcount));
- queue_delayed_work(i915->unordered_wq, &wl->work,
+ queue_delayed_work(display->wq.unordered, &wl->work,
msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 392c3653d0d7..7976fec88606 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,8 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/math.h>
#include <linux/notifier.h>
#include <linux/seq_buf.h>
#include <linux/slab.h>
@@ -34,7 +36,6 @@
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
-
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
@@ -45,12 +46,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -58,10 +58,13 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -87,12 +90,10 @@
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-#include "intel_crtc_state_dump.h"
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -846,7 +847,7 @@ small_joiner_ram_size_bits(struct intel_display *display)
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
+static u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
@@ -938,6 +939,7 @@ static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
return ultrajoiner_ram_bits() / mode_hdisplay;
}
+/* TODO: return a bpp_x16 value */
static
u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
u32 mode_clock, u32 mode_hdisplay,
@@ -954,6 +956,7 @@ u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
return max_bpp;
}
+/* TODO: return a bpp_x16 value */
u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
@@ -1194,7 +1197,7 @@ intel_dp_output_format(struct intel_connector *connector,
int intel_dp_min_bpp(enum intel_output_format output_format)
{
if (output_format == INTEL_OUTPUT_FORMAT_RGB)
- return 6 * 3;
+ return intel_display_min_pipe_bpp();
else
return 8 * 3;
}
@@ -1603,6 +1606,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */
+ if (display->platform.g4x && port_clock == 268800)
+ port_clock = 270000;
+
/* eDP 1.4 rate select method. */
if (intel_dp->use_rate_select) {
*link_bw = 0;
@@ -2066,7 +2075,7 @@ int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector
pipe_config, bpc) >> 4;
}
-static int dsc_src_min_compressed_bpp(void)
+int intel_dp_dsc_min_src_compressed_bpp(void)
{
/* Min Compressed bpp supported by source is 8 */
return 8;
@@ -2098,7 +2107,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
/*
* Note: for pre-13 display you still need to check the validity of each step.
*/
-static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
@@ -2106,12 +2115,19 @@ static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
if (DISPLAY_VER(display) < 14 || !incr)
return fxp_q4_from_int(1);
+ if (connector->mst.dp &&
+ !connector->link.force_bpp_x16 && !connector->mst.dp->force_dsc_fractional_bpp_en)
+ return fxp_q4_from_int(1);
+
/* fxp q4 */
return fxp_q4_from_int(1) / incr;
}
-/* Note: This is not universally usable! */
-static bool intel_dp_dsc_valid_bpp(struct intel_dp *intel_dp, int bpp_x16)
+/*
+ * Note: for bpp_x16 to be valid it must be also within the source/sink's
+ * min..max bpp capability range.
+ */
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16)
{
struct intel_display *display = to_intel_display(intel_dp);
int i;
@@ -2149,24 +2165,16 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int output_bpp;
- int dsc_min_bpp;
- int dsc_max_bpp;
int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int bpp_x16;
int ret;
- dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
-
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
- dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
-
- /* FIXME: remove the round trip via integers */
- min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
- max_bpp_x16 = fxp_q4_from_int(dsc_max_bpp);
+ max_bpp_x16 = min(fxp_q4_from_int(dsc_joiner_max_bpp), limits->link.max_bpp_x16);
bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
@@ -2174,8 +2182,12 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_bpp_x16 = round_up(limits->link.min_bpp_x16, bpp_step_x16);
+ max_bpp_x16 = round_down(max_bpp_x16, bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
- if (!intel_dp_dsc_valid_bpp(intel_dp, bpp_x16))
+ if (!intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16))
continue;
ret = dsc_compute_link_config(intel_dp,
@@ -2478,7 +2490,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
@@ -2523,6 +2535,7 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -2576,7 +2589,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
- intel_dp->attached_connector,
+ connector,
crtc_state,
dsc,
limits);
@@ -2637,7 +2650,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2671,7 +2684,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -3104,6 +3117,76 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
}
}
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ /*
+ * min symbol cycles is 3(BS,VBID, BE) for 128b/132b and
+ * 5(BS, VBID, MVID, MAUD, BE) for 8b/10b
+ */
+ int min_sym_cycles = intel_dp_is_uhbr(crtc_state) ? 3 : 5;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
+ int min_hblank;
+ int max_lane_count = 4;
+ int hactive_sym_cycles, htotal_sym_cycles;
+ int dsc_slices = 0;
+ int link_bpp_x16;
+
+ if (DISPLAY_VER(display) < 30)
+ return 0;
+
+ /* MIN_HBLANK should be set only for 8b/10b MST or for 128b/132b SST/MST */
+ if (!is_mst && !intel_dp_is_uhbr(crtc_state))
+ return 0;
+
+ if (crtc_state->dsc.compression_enable) {
+ dsc_slices = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ num_joined_pipes);
+ if (!dsc_slices) {
+ drm_dbg(display->drm, "failed to calculate dsc slice count\n");
+ return -EINVAL;
+ }
+ }
+
+ if (crtc_state->dsc.compression_enable)
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
+ else
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ crtc_state->pipe_bpp));
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slices,
+ link_bpp_x16,
+ symbol_size, is_mst);
+ htotal_sym_cycles = adjusted_mode->htotal * hactive_sym_cycles /
+ adjusted_mode->hdisplay;
+
+ min_hblank = htotal_sym_cycles - hactive_sym_cycles;
+ /* minimum Hblank calculation: https://groups.vesa.org/wg/DP/document/20494 */
+ min_hblank = max(min_hblank, min_sym_cycles);
+
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = min_hblank - 2;
+
+ min_hblank = min(10, min_hblank);
+ crtc_state->min_hblank = min_hblank;
+
+ return 0;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -3203,6 +3286,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n);
}
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
@@ -3223,7 +3310,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -3587,7 +3674,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
- intel_dp->link_trained = true;
+ intel_dp->link.active = true;
}
}
@@ -3645,6 +3732,9 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
@@ -4456,6 +4546,23 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Display WA for HSD #13013007775: mtl/arl/lnl
+ * Read the sink count and link service IRQ registers in separate
+ * transactions to prevent disconnecting the sink on a TBT link
+ * inadvertently.
+ */
+ if (IS_DISPLAY_VER(display, 14, 20) && !display->platform.battlemage) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3)
+ return false;
+
+ /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &esi[3]) == 1;
+ }
+
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
}
@@ -5005,8 +5112,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
-
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
@@ -5021,7 +5126,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->mst.active_links > 0 && link_ok &&
+ if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5082,7 +5187,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->link_trained)
+ if (!intel_dp->link.active)
return false;
/*
@@ -5394,6 +5499,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
+ if (intel_alpm_get_error(intel_dp)) {
+ intel_alpm_disable(intel_dp);
+ intel_dp->alpm_parameters.sink_alpm_error = true;
+ }
+
if (intel_dp_test_short_pulse(intel_dp))
reprobe_needed = true;
@@ -5689,6 +5799,28 @@ intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
+static bool intel_dp_needs_dpcd_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (intel_dp_is_edp(intel_dp))
+ return false;
+
+ if (force_on_external)
+ return true;
+
+ if (intel_dp->is_mst)
+ return false;
+
+ return drm_edid_has_quirk(&connector->base, DRM_EDID_QUIRK_DP_DPCD_PROBE);
+}
+
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ drm_dp_dpcd_set_probe(&intel_dp->aux,
+ intel_dp_needs_dpcd_probe(intel_dp, force_on_external));
+}
+
static int
intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -5817,6 +5949,8 @@ out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
+ intel_dp_dpcd_set_probe(intel_dp, false);
+
if (!intel_dp_is_edp(intel_dp))
drm_dp_set_subconnector_property(&connector->base,
status,
@@ -5829,23 +5963,26 @@ out_vdd_off:
}
static void
-intel_dp_force(struct drm_connector *connector)
+intel_dp_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
intel_dp_set_edid(intel_dp);
+
+ intel_dp_dpcd_set_probe(intel_dp, false);
}
static int intel_dp_get_modes(struct drm_connector *_connector)
@@ -5881,24 +6018,25 @@ static int intel_dp_get_modes(struct drm_connector *_connector)
}
static int
-intel_dp_connector_register(struct drm_connector *connector)
+intel_dp_connector_register(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
drm_dbg_kms(display->drm, "registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ intel_dp->aux.name, connector->base.kdev->kobj.name);
- intel_dp->aux.dev = connector->kdev;
+ intel_dp->aux.dev = connector->base.kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux, connector);
+ drm_dp_cec_register_connector(&intel_dp->aux, &connector->base);
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
@@ -5909,20 +6047,21 @@ intel_dp_connector_register(struct drm_connector *connector)
*/
if (intel_lspcon_init(dig_port)) {
if (intel_lspcon_detect_hdr_capability(dig_port))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
}
return ret;
}
static void
-intel_dp_connector_unregister(struct drm_connector *connector)
+intel_dp_connector_unregister(struct drm_connector *_connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
void intel_dp_connector_sync_state(struct intel_connector *connector,
@@ -5983,21 +6122,21 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
+ struct intel_connector *connector;
int ret = 0;
drm_connector_list_iter_begin(display->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (!connector->has_tile ||
- connector->tile_group->id != tile_group_id)
+ if (!connector->base.has_tile ||
+ connector->base.tile_group->id != tile_group_id)
continue;
conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
+ &connector->base);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
break;
@@ -6061,10 +6200,11 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
}
static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- struct drm_connector *connector)
+ struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
+ drm_atomic_get_old_connector_state(&state->base, &connector->base);
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
u8 transcoders;
@@ -6086,17 +6226,18 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
transcoders);
}
-static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+static int intel_dp_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
- struct intel_display *display = to_intel_display(conn->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
- struct intel_connector *intel_conn = to_intel_connector(conn);
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(_state, &connector->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
- ret = intel_digital_connector_atomic_check(conn, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
@@ -6106,12 +6247,12 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
- if (!intel_connector_needs_modeset(state, conn))
+ if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
ret = intel_dp_tunnel_atomic_check_state(state,
intel_dp,
- intel_conn);
+ connector);
if (ret)
return ret;
@@ -6122,26 +6263,26 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(display) < 9)
return 0;
- if (conn->has_tile) {
- ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (connector->base.has_tile) {
+ ret = intel_modeset_tile_group(state, connector->base.tile_group->id);
if (ret)
return ret;
}
- return intel_modeset_synced_crtcs(state, conn);
+ return intel_modeset_synced_crtcs(state, &connector->base);
}
-static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
+static void intel_dp_oob_hotplug_event(struct drm_connector *_connector,
enum drm_connector_status hpd_state)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
bool hpd_high = hpd_state == connector_status_connected;
unsigned int hpd_pin = encoder->hpd_pin;
bool need_work = false;
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
display->hotplug.event_bits |= BIT(hpd_pin);
@@ -6150,10 +6291,10 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
hpd_high);
need_work = true;
}
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (need_work)
- intel_hpd_schedule_detection(i915);
+ intel_hpd_schedule_detection(display);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6180,13 +6321,12 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd ||
- intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ intel_display_rpm_suspended(display) ||
!intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -6215,10 +6355,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
* complete the DP tunnel BW request for the latter connector/encoder
* waiting for this encoder's DPRX read, perform a dummy read here.
*/
- if (long_hpd)
+ if (long_hpd) {
+ intel_dp_dpcd_set_probe(intel_dp, true);
+
intel_dp_read_dprx_caps(intel_dp, dpcd);
- if (long_hpd) {
intel_dp->reset_link_params = true;
intel_dp_invalidate_source_oui(intel_dp);
@@ -6283,36 +6424,37 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
}
static void
-intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
- drm_connector_attach_dp_subconnector_property(connector);
+ drm_connector_attach_dp_subconnector_property(&connector->base);
if (!display->platform.g4x && port != PORT_A)
- intel_attach_force_audio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_broadcast_rgb_property(&connector->base);
if (HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 6, 10);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 10);
else if (DISPLAY_VER(display) >= 5)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
/* Register HDMI colorspace for case of lspcon */
if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
- drm_connector_attach_content_type_property(connector);
- intel_attach_hdmi_colorspace_property(connector);
+ drm_connector_attach_content_type_property(&connector->base);
+ intel_attach_hdmi_colorspace_property(&connector->base);
} else {
- intel_attach_dp_colorspace_property(connector);
+ intel_attach_dp_colorspace_property(&connector->base);
}
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (HAS_VRR(display))
- drm_connector_attach_vrr_capable_property(connector);
+ drm_connector_attach_vrr_capable_property(&connector->base);
}
static void
@@ -6347,7 +6489,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6362,9 +6503,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev_priv)) {
+ if (intel_get_lvds_encoder(display)) {
drm_WARN_ON(display->drm,
- !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ !(HAS_PCH_IBX(display) || HAS_PCH_CPT(display)));
drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
@@ -6395,7 +6536,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
*/
intel_hpd_enable_detection(encoder);
- intel_alpm_init_dpcd(intel_dp);
+ intel_alpm_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 9189db4c2594..0657f5681196 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -147,6 +147,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_con
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
int bpc);
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes);
@@ -173,8 +174,6 @@ bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
-
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -194,6 +193,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -208,5 +208,11 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_dsc_min_src_compressed_bpp(void);
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index ec27bbd70bcf..829a7c0fbe4f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -3,8 +3,9 @@
* Copyright © 2020-2021 Intel Corporation
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -111,10 +112,9 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -177,12 +177,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(i915))
+ if (display->platform.broadwell)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -247,7 +246,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
+ intel_wakeref_t pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -272,7 +271,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(display, aux_domain);
- pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * The PPS state needs to be locked for:
+ * - eDP on all platforms, since AUX transfers on eDP need VDD power
+ * (either forced or via panel power) which depends on the PPS
+ * state.
+ * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
+ * since changing the PPS state (via a parallel modeset for
+ * instance) may interfere with the AUX transfers on a non-eDP
+ * output as well.
+ */
+ if (intel_dp_is_edp(intel_dp) ||
+ display->platform.valleyview || display->platform.cherryview)
+ pps_wakeref = intel_pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -430,7 +442,9 @@ out:
if (vdd)
intel_pps_vdd_off_unlocked(intel_dp, false);
- intel_pps_unlock(intel_dp, pps_wakeref);
+ if (pps_wakeref)
+ intel_pps_unlock(intel_dp, pps_wakeref);
+
intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
@@ -771,7 +785,6 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -786,10 +799,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
@@ -799,9 +812,9 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
+ else if (display->platform.broadwell || display->platform.haswell)
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(i915))
+ else if (HAS_PCH_SPLIT(display))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
@@ -821,6 +834,8 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ intel_dp_dpcd_set_probe(intel_dp, true);
}
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8173de8aec63..41228478b21c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -36,7 +36,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
@@ -146,10 +145,10 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* ranges for such panels.
*/
if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
- !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ !(connector->base.display_info.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
- "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
@@ -476,31 +475,6 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en
return connector->panel.backlight.level;
}
-static int
-intel_dp_aux_vesa_set_luminance(struct intel_connector *connector, u32 level)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- u8 buf[3];
- int ret;
-
- level = level * 1000;
- level &= 0xffffff;
- buf[0] = (level & 0x0000ff);
- buf[1] = (level & 0x00ff00) >> 8;
- buf[2] = (level & 0xff0000) >> 16;
-
- ret = drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
- buf, sizeof(buf));
- if (ret != sizeof(buf)) {
- drm_err(intel_dp->aux.drm_dev,
- "%s: Failed to set VESA Aux Luminance: %d\n",
- intel_dp->aux.name, ret);
- return -EINVAL;
- } else {
- return 0;
- }
-}
-
static void
intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -508,11 +482,6 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support) {
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
-
if (!panel->backlight.edp.vesa.info.aux_set) {
const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
@@ -529,18 +498,6 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int ret;
-
- if (panel->backlight.edp.vesa.luminance_control_support) {
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
- DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE);
-
- if (ret == 1)
- return;
-
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
if (!panel->backlight.edp.vesa.info.aux_enable) {
u32 pwm_level;
@@ -581,13 +538,41 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
&connector->base.display_info.luminance_range;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- u16 current_level;
+ u32 current_level;
u8 current_mode;
int ret;
- if (panel->backlight.edp.vesa.luminance_control_support) {
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+ luminance_range->max_luminance,
+ panel->vbt.backlight.pwm_freq_hz,
+ intel_dp->edp_dpcd, &current_level, &current_mode,
+ false);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
+ if (!panel->backlight.edp.vesa.info.aux_set ||
+ !panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
+ return ret;
+ }
+ }
+
+ if (panel->backlight.edp.vesa.info.luminance_set) {
if (luminance_range->max_luminance) {
- panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = luminance_range->min_luminance;
} else {
panel->backlight.max = 512;
@@ -598,54 +583,26 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
connector->base.base.id, connector->base.name);
- } else {
- ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
- &current_level, &current_mode);
- if (ret < 0)
- return ret;
-
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
-
- if (!panel->backlight.edp.vesa.info.aux_set ||
- !panel->backlight.edp.vesa.info.aux_enable) {
- ret = panel->backlight.pwm_funcs->setup(connector, pipe);
- if (ret < 0) {
- drm_err(display->drm,
- "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
- connector->base.base.id, connector->base.name, ret);
- return ret;
- }
+ } else if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
-
- if (panel->backlight.edp.vesa.info.aux_set) {
- panel->backlight.max = panel->backlight.edp.vesa.info.max;
- panel->backlight.min = 0;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- panel->backlight.level = current_level;
- panel->backlight.enabled = panel->backlight.level != 0;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ } else {
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level =
+ panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
} else {
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.min = panel->backlight.pwm_level_min;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
- panel->backlight.level =
- panel->backlight.pwm_funcs->get(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
}
@@ -663,7 +620,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
+ if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) &&
+ (intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index cc312596fb77..bd757db85927 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -11,9 +11,9 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -805,10 +805,16 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
enum pipe pipe = (enum pipe)cpu_transcoder;
enum port port = dig_port->base.port;
int ret;
-
- drm_WARN_ON(display->drm, enable &&
- !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port))
- & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+ u32 val;
+ u8 stream_type;
+
+ if (DISPLAY_VER(display) < 30) {
+ val = intel_de_read(display,
+ HDCP2_AUTH_STREAM(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(AUTH_STREAM_TYPE_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
if (ret)
@@ -824,6 +830,14 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return -ETIMEDOUT;
}
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display,
+ HDCP2_STREAM_STATUS(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(STREAM_TYPE_STATUS_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2966f5b39392..a479b63112ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -56,6 +56,8 @@
lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
} while (0)
+#define MAX_SEQ_TRAIN_FAILURES 2
+
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* resetting its internal state when the mode is changed from
* non-transparent to transparent.
*/
- if (intel_dp->link_trained) {
+ if (intel_dp->link.active) {
if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
goto out_reset_lttpr_count;
@@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b
static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Currently, we set the MSA ignore bit based on vrr.in_range.
+ * We can't really read that out during driver load since we don't have
+ * the connector information read in yet. So if we do end up doing a
+ * modeset during initial_commit() we'll clear the MSA ignore bit.
+ * GOP likely wouldn't have set this bit so after the initial commit,
+ * if there are no modesets and we enable VRR mode seamlessly
+ * (without a full modeset), the MSA ignore bit might never get set.
+ *
+ * #TODO: Implement readout of vrr.in_range.
+ * We need fastset support for setting the MSA ignore bit in DPCD,
+ * especially on the first real commit when clearing the inherited flag.
+ */
intel_dp_link_training_set_mode(intel_dp,
- crtc_state->port_clock, crtc_state->vrr.flipline);
+ crtc_state->port_clock, crtc_state->vrr.in_range);
}
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
@@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_dp->link_trained = true;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link.active = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
@@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
+
+ intel_hpd_unblock(encoder);
+
+ if (!display->hotplug.ignore_long_hpd &&
+ intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
+ int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
+
+ intel_encoder_link_check_queue_work(encoder, delay_ms);
+ }
}
static bool
@@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* non-transparent mode. During an earlier LTTPR detection this
* could've been prevented by an active link.
*/
- int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ int lttpr_count;
+
+ intel_hpd_block(encoder);
+
+ lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
@@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
} else if (passed) {
intel_dp->link.seq_train_failures = 0;
- intel_encoder_link_check_queue_work(encoder, 2000);
return;
}
@@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
return;
}
- if (intel_dp->link.seq_train_failures < 2) {
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
return;
- }
if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
return;
@@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
@@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 02f95108c637..74497c9a0554 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,14 +23,17 @@
*
*/
+#include <linux/log2.h>
+#include <linux/math.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -38,6 +41,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -51,7 +55,9 @@
#include "intel_link_bw.h"
#include "intel_pfit.h"
#include "intel_psr.h"
+#include "intel_step.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
/*
@@ -104,6 +110,35 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
return &dig_port->dp;
}
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst.active_streams;
+}
+
+static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
+
+ if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
+ return true;
+
+ return --intel_dp->mst.active_streams == 0;
+}
+
+static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
+
+ return intel_dp->mst.active_streams++ == 0;
+}
+
+/* TODO: return a bpp_x16 value */
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -210,24 +245,13 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
- int bpp_x16)
+static void mst_stream_update_slots(const struct intel_crtc_state *crtc_state,
+ struct drm_dp_mst_topology_state *topology_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
- int hblank;
-
- if (DISPLAY_VER(display) < 20)
- return;
-
- /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
- hblank = DIV_ROUND_UP((DIV_ROUND_UP
- (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
- symbol_size);
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
- crtc_state->min_hblank = hblank;
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
}
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
@@ -242,7 +266,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool is_mst = intel_dp->is_mst;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp_x16;
@@ -252,6 +276,12 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
fxp_q4_to_frac(max_bpp_x16) ||
fxp_q4_to_frac(bpp_step_x16)));
+ if (!bpp_step_x16) {
+ /* Allow using zero step only to indicate single try for a given bpp. */
+ drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16);
+ bpp_step_x16 = 1;
+ }
+
if (is_mst) {
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
if (IS_ERR(mst_state))
@@ -259,6 +289,8 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
crtc_state->lane_count);
+
+ mst_stream_update_slots(crtc_state, mst_state);
}
if (dsc) {
@@ -287,12 +319,20 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
}
+ drm_WARN_ON(display->drm, min_bpp_x16 % bpp_step_x16 || max_bpp_x16 % bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
int local_bw_overhead;
int link_bpp_x16;
drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
+ if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) {
+ /* SST must have validated the single bpp tried here already earlier. */
+ drm_WARN_ON(display->drm, !is_mst);
+ continue;
+ }
+
link_bpp_x16 = dsc ? bpp_x16 :
fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
fxp_q4_to_int(bpp_x16)));
@@ -300,8 +340,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16);
-
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
@@ -358,6 +396,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
connector->mst.port,
dfixed_trunc(pbn));
+
+ /* TODO: Check this already in drm_dp_atomic_find_time_slots(). */
+ if (slots > mst_state->total_avail_slots)
+ slots = -EINVAL;
} else {
/* Same as above for remote_tu */
crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
@@ -377,10 +419,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
-
- /* Allow using zero step to indicate one try */
- if (!bpp_step_x16)
- break;
}
if (slots < 0) {
@@ -428,7 +466,8 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
int num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- int min_compressed_bpp, max_compressed_bpp;
+ int min_compressed_bpp_x16, max_compressed_bpp_x16;
+ int bpp_step_x16;
max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
@@ -453,46 +492,28 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
- min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
+ min_compressed_bpp_x16 = limits->link.min_bpp_x16;
+ max_compressed_bpp_x16 = limits->link.max_bpp_x16;
- drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
- min_compressed_bpp, max_compressed_bpp);
+ drm_dbg_kms(display->drm,
+ "DSC Sink supported compressed min bpp " FXP_Q4_FMT " compressed max bpp " FXP_Q4_FMT "\n",
+ FXP_Q4_ARGS(min_compressed_bpp_x16), FXP_Q4_ARGS(max_compressed_bpp_x16));
- /* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
- crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
- crtc_state->pipe_bpp);
+ bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
+
+ max_compressed_bpp_x16 = min(max_compressed_bpp_x16, fxp_q4_from_int(crtc_state->pipe_bpp) - bpp_step_x16);
+
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_compressed_bpp_x16 = round_up(min_compressed_bpp_x16, bpp_step_x16);
+ max_compressed_bpp_x16 = round_down(max_compressed_bpp_x16, bpp_step_x16);
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
- fxp_q4_from_int(min_compressed_bpp),
- fxp_q4_from_int(max_compressed_bpp),
- fxp_q4_from_int(1), true);
-}
-
-static int mst_stream_update_slots(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
- struct drm_dp_mst_topology_state *topology_state;
- u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
- DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
-
- topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
- if (IS_ERR(topology_state)) {
- drm_dbg_kms(display->drm, "slot update failed\n");
- return PTR_ERR(topology_state);
- }
-
- drm_dp_mst_update_slots(topology_state, link_coding_cap);
-
- return 0;
+ min_compressed_bpp_x16,
+ max_compressed_bpp_x16,
+ bpp_step_x16, true);
}
static int mode_hblank_period_ns(const struct drm_display_mode *mode)
@@ -590,12 +611,13 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ if (!intel_dp_compute_config_limits(intel_dp, connector,
+ crtc_state, false, dsc,
limits))
return false;
@@ -699,10 +721,6 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
- if (ret)
- return ret;
-
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
@@ -710,6 +728,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -990,25 +1014,17 @@ static void mst_stream_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- enum transcoder trans = old_crtc_state->cpu_transcoder;
-
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
- if (intel_dp->mst.active_links == 1)
- intel_dp->link_trained = false;
+ if (intel_dp_mst_active_streams(intel_dp) == 1)
+ intel_dp->link.active = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
-
- if (DISPLAY_VER(display) >= 20)
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1034,8 +1050,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
bool last_mst_stream;
int i;
- intel_dp->mst.active_links--;
- last_mst_stream = intel_dp->mst.active_links == 0;
+ last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
+
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1062,6 +1078,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1104,8 +1122,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1116,7 +1132,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0 &&
+ if (intel_dp_mst_active_streams(intel_dp) == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1129,7 +1145,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0)
+ if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1189,13 +1205,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->mst.active_links == 0;
+
+ first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1210,8 +1224,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->mst.active_links++;
-
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
@@ -1279,9 +1291,9 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->mst.active_links == 1;
+ bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
struct intel_crtc *pipe_crtc;
- int ret, i, min_hblank;
+ int ret, i;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1296,41 +1308,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
- if (DISPLAY_VER(display) >= 20) {
- /*
- * adjust the BlankingStart/BlankingEnd framing control from
- * the calculated value
- */
- min_hblank = pipe_config->min_hblank - 2;
-
- /* Maximum value to be programmed is limited to 0x10 */
- min_hblank = min(0x10, min_hblank);
-
- /*
- * Minimum hblank accepted for 128b/132b would be 5 and for
- * 8b/10b would be 3 symbol count
- */
- if (intel_dp_is_uhbr(pipe_config))
- min_hblank = max(min_hblank, 5);
- else
- min_hblank = max(min_hblank, 3);
-
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
- min_hblank);
- }
-
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ intel_vrr_transcoder_enable(pipe_config);
+
intel_ddi_clear_act_sent(encoder, pipe_config);
intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
intel_ddi_wait_for_act_sent(encoder, pipe_config);
drm_dp_check_act_status(&intel_dp->mst.mgr);
@@ -1348,8 +1336,6 @@ static void mst_stream_enable(struct intel_atomic_state *state,
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
- intel_audio_sdp_split_update(pipe_config);
-
intel_enable_transcoder(pipe_config);
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) {
@@ -1870,12 +1856,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
-{
- return dig_port->dp.mst.active_links;
-}
-
-int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -2101,7 +2081,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
u8 rate_select;
u8 link_bw;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c1bbfeb02ca9..ab09b487c6bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -18,7 +18,7 @@ struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index bd61f3c3ec91..6ed5012c5fac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -10,9 +10,9 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 429f89543789..3f77ad92c156 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,13 +21,15 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -222,9 +224,7 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct intel_display *display, int *count)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
*count = ARRAY_SIZE(glk_dpio_phy_info);
return glk_dpio_phy_info;
} else {
@@ -428,7 +428,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
* use 1ms due to occasional timeouts observed with that.
*/
if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL))
drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
@@ -717,53 +717,53 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= DPIO_SWING_MARGIN000(margin_reg_value);
@@ -776,7 +776,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -786,79 +786,89 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset)
+static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW1(ch), val);
}
}
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ vlv_dpio_get(display->drm);
+ __chv_data_lane_soft_reset(encoder, crtc_state, reset);
+ vlv_dpio_put(display->drm);
+}
+
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
@@ -877,47 +887,47 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_powergate_lanes(encoder, true, lane_mask);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, true);
+ __chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -925,38 +935,38 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW19(ch));
if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
else
val &= ~CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW19(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -966,7 +976,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0;
else
data = (i == 1) ? 0 : DPIO_UPAR;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -981,17 +991,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -999,7 +1009,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -1008,9 +1018,9 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, false);
+ __chv_data_lane_soft_reset(encoder, crtc_state, false);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
@@ -1027,25 +1037,25 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Leave the power down bit cleared for at least one
@@ -1064,97 +1074,97 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW2_GRP(ch),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
DPIO_PCS_CLK_DATAWIDTH_8_10 |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable clock channels for this port */
val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
val |= 0xc4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
+ vlv_dpio_put(display->drm);
}
void vlv_wait_port_ready(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 08a30e5aafce..f969c5399a51 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,13 +6,14 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -24,11 +25,11 @@
#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
-struct intel_dpll_funcs {
+struct intel_dpll_global_funcs {
int (*crtc_compute_clock)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
- int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+ int (*crtc_get_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
};
struct intel_limit {
@@ -373,14 +374,14 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return i915->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(i915))
+ return display->vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(display))
return 120000;
- else if (DISPLAY_VER(i915) != 2)
+ else if (DISPLAY_VER(display) != 2)
return 96000;
else
return 48000;
@@ -389,27 +390,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
u32 tmp;
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ if (display->platform.cherryview && crtc->pipe != PIPE_A)
+ tmp = display->state.chv_dpll_md[crtc->pipe];
else
- tmp = intel_de_read(dev_priv,
- DPLL_MD(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display,
+ DPLL_MD(display, crtc->pipe));
hw_state->dpll_md = tmp;
}
- hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
+ hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
- hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ if (!display->platform.valleyview && !display->platform.cherryview) {
+ hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
hw_state->dpll &= ~(DPLL_LOCK_VLV |
@@ -421,8 +422,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
/* Returns the clock of the currently programmed mode of the given pipe. */
void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
u32 dpll = hw_state->dpll;
u32 fp;
@@ -436,7 +437,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -444,8 +445,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
+ if (DISPLAY_VER(display) != 2) {
+ if (display->platform.pineview)
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -462,23 +463,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
7 : 14;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
- if (IS_PINEVIEW(dev_priv))
+ if (display->platform.pineview)
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
enum pipe lvds_pipe;
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ if (display->platform.i85x &&
+ intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
+ u32 lvds = intel_de_read(display, LVDS);
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -513,8 +514,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -526,9 +527,9 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
@@ -541,8 +542,8 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -554,13 +555,13 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
@@ -577,7 +578,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct intel_display *display,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -590,14 +591,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (!display->platform.pineview &&
+ !display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake)
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (!display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -620,7 +621,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -628,7 +629,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -656,7 +657,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -677,7 +678,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -714,7 +715,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -733,7 +734,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -770,7 +771,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int max_n;
bool found = false;
@@ -794,7 +795,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -817,7 +818,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
const struct dpll *calculated_clock,
const struct dpll *best_clock,
unsigned int best_error_ppm,
@@ -827,13 +828,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (display->platform.cherryview) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
}
- if (drm_WARN_ON_ONCE(dev, !target_freq))
+ if (drm_WARN_ON_ONCE(display->drm, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -864,8 +865,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -889,12 +889,12 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target,
+ if (!vlv_PLL_is_optimal(display, target,
&clock,
best_clock,
bestppm, &ppm))
@@ -922,8 +922,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
unsigned int best_error_ppm;
struct dpll clock;
u64 m2;
@@ -958,10 +957,10 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(display, limit, &clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
@@ -1005,8 +1004,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1016,8 +1013,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -1030,10 +1027,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
WARN_ON(reduced_clock->p1 != clock->p1);
} else {
@@ -1057,7 +1054,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
}
WARN_ON(reduced_clock->p2 != clock->p2);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -1075,11 +1072,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
hw_state->fp0 = pnv_dpll_compute_fp(clock);
hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
} else {
@@ -1089,7 +1085,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
hw_state->dpll_md = i965_dpll_md(crtc_state);
}
@@ -1098,8 +1094,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1129,7 +1123,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
- if (IS_I830(dev_priv) ||
+ if (display->platform.i830 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -1157,18 +1151,18 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- ret = intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_dpll_compute(state, crtc, encoder);
if (ret)
return ret;
@@ -1183,20 +1177,20 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_reserve_shared_dplls(state, crtc, encoder);
+ return intel_dpll_reserve(state, crtc, encoder);
}
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1230,7 +1224,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
if (ret)
return ret;
- /* TODO: Do the readback via intel_compute_shared_dplls() */
+ /* TODO: Do the readback via intel_dpll_compute() */
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1241,12 +1235,10 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
return 25;
if (crtc_state->sdvo_tv_clock)
@@ -1276,8 +1268,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE;
@@ -1311,7 +1301,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ if (INTEL_NUM_PIPES(display) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -1362,7 +1352,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1375,13 +1364,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->display.vbt.lvds_ssc_freq);
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ display->vbt.lvds_ssc_freq);
+ refclk = display->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev_priv)) {
+ if (intel_is_dual_link_lvds(display)) {
if (refclk == 100000)
limit = &ilk_limits_dual_lvds_100m;
else
@@ -1406,7 +1395,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- ret = intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_dpll_compute(state, crtc, NULL);
if (ret)
return ret;
@@ -1416,8 +1405,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
}
-static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1426,7 +1415,7 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
if (!crtc_state->has_pch_encoder)
return 0;
- return intel_reserve_shared_dplls(state, crtc, NULL);
+ return intel_dpll_reserve(state, crtc, NULL);
}
static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
@@ -1539,7 +1528,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1547,13 +1535,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -1589,7 +1577,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1597,8 +1584,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1628,7 +1615,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1636,8 +1622,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1669,7 +1655,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1677,8 +1662,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1706,57 +1691,57 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static const struct intel_dpll_funcs mtl_dpll_funcs = {
+static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
-static const struct intel_dpll_funcs dg2_dpll_funcs = {
+static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
.crtc_compute_clock = dg2_crtc_compute_clock,
};
-static const struct intel_dpll_funcs hsw_dpll_funcs = {
+static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
.crtc_compute_clock = hsw_crtc_compute_clock,
- .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
+ .crtc_get_dpll = hsw_crtc_get_dpll,
};
-static const struct intel_dpll_funcs ilk_dpll_funcs = {
+static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
.crtc_compute_clock = ilk_crtc_compute_clock,
- .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
+ .crtc_get_dpll = ilk_crtc_get_dpll,
};
-static const struct intel_dpll_funcs chv_dpll_funcs = {
+static const struct intel_dpll_global_funcs chv_dpll_funcs = {
.crtc_compute_clock = chv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs vlv_dpll_funcs = {
+static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
.crtc_compute_clock = vlv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs g4x_dpll_funcs = {
+static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
.crtc_compute_clock = g4x_crtc_compute_clock,
};
-static const struct intel_dpll_funcs pnv_dpll_funcs = {
+static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
.crtc_compute_clock = pnv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
.crtc_compute_clock = i9xx_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -1764,9 +1749,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1774,26 +1759,26 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
- if (!crtc_state->hw.enable || crtc_state->shared_dpll)
+ if (!crtc_state->hw.enable || crtc_state->intel_dpll)
return 0;
- if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_dpll)
return 0;
- ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1802,43 +1787,42 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
}
void
-intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 14)
- dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
- else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &chv_dpll_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+intel_dpll_init_clock_hook(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ display->funcs.dpll = &mtl_dpll_funcs;
+ else if (display->platform.dg2)
+ display->funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
+ display->funcs.dpll = &hsw_dpll_funcs;
+ else if (HAS_PCH_SPLIT(display))
+ display->funcs.dpll = &ilk_dpll_funcs;
+ else if (display->platform.cherryview)
+ display->funcs.dpll = &chv_dpll_funcs;
+ else if (display->platform.valleyview)
+ display->funcs.dpll = &vlv_dpll_funcs;
+ else if (display->platform.g4x)
+ display->funcs.dpll = &g4x_dpll_funcs;
+ else if (display->platform.pineview)
+ display->funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(display) != 2)
+ display->funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+ display->funcs.dpll = &i8xx_dpll_funcs;
}
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct intel_display *display)
{
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return display->platform.pineview || display->platform.mobile;
}
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1846,27 +1830,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
+ if (i9xx_has_pps(display))
assert_pps_unlocked(display, pipe);
- intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
- intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
+ intel_de_write(display, FP0(pipe), hw_state->fp0);
+ intel_de_write(display, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ if (DISPLAY_VER(display) >= 4) {
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1874,18 +1858,18 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
u32 tmp;
@@ -1894,54 +1878,54 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
tmp |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp, coreclk;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy, ch);
+ vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
tmp &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
tmp = DPIO_M1_DIV(clock->m1) |
@@ -1957,70 +1941,63 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
tmp |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x009f0003);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x00d0000f);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
+ coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2030,7 +2007,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2038,14 +2015,14 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
@@ -2054,44 +2031,44 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
m2_frac = clock->m2 & 0x3fffff;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
DPIO_CHV_S1_DIV(5) |
DPIO_CHV_P1_DIV(clock->p1) |
DPIO_CHV_P2_DIV(clock->p2) |
DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
if (m2_frac)
tmp |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
if (!m2_frac)
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
if (clock->vco == 5400000) {
@@ -2116,39 +2093,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
DPIO_AFC_RECAL);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -2156,18 +2133,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2177,7 +2153,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VCO_ENABLE);
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2192,29 +2168,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
+ intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(display, DPLL_MD(display, PIPE_B),
hw_state->dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
+ intel_de_write(display, CBR4_VLV, 0);
+ display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, DPLL(display, PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
}
/**
* vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
@@ -2222,10 +2198,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
@@ -2238,7 +2213,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
crtc_state->dpll = *dpll;
crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
chv_compute_dpll(crtc_state);
chv_enable_pll(crtc_state);
} else {
@@ -2251,9 +2226,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
return 0;
}
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2268,9 +2242,8 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2286,14 +2259,14 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_write(display, DPLL(display, pipe), val);
intel_de_posting_read(display, DPLL(display, pipe));
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
@@ -2316,18 +2289,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/**
* vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
else
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
}
/* Only for pre-ILK configs */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 21d06cbd2ce7..3444a2dd3166 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -8,20 +8,19 @@
#include <linux/types.h>
+enum pipe;
struct dpll;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dpll_hw_state;
-enum pipe;
-void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
@@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe);
void chv_enable_pll(const struct intel_crtc_state *crtc_state);
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct intel_display *display, enum pipe pipe);
void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe);
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c825a507b905..33e0398120c8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,11 +24,13 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -38,6 +40,7 @@
#include "intel_hti.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
+#include "intel_step.h"
#include "intel_tc.h"
/**
@@ -49,34 +52,34 @@
* share a PLL if their configurations match.
*
* This file provides an abstraction over display PLLs. The function
- * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * intel_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
* modset interface. During an atomic operation, required PLLs can be reserved
* for a given CRTC and encoder configuration by calling
- * intel_reserve_shared_dplls() and previously reserved PLLs can be released
- * with intel_release_shared_dplls().
+ * intel_dpll_reserve() and previously reserved PLLs can be released
+ * with intel_dpll_release().
* Changes to the users are first staged in the atomic state, and then made
- * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * effective by calling intel_dpll_swap_state() during the atomic
* commit phase.
*/
/* platform specific hooks for managing DPLLs */
-struct intel_shared_dpll_funcs {
+struct intel_dpll_funcs {
/*
- * Hook for enabling the pll, called from intel_enable_shared_dpll() if
+ * Hook for enabling the pll, called from intel_enable_dpll() if
* the pll is not already enabled.
*/
void (*enable)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
/*
- * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * Hook for disabling the pll, called from intel_disable_dpll()
* only when it is safe to disable the pll, i.e., there are no more
* tracked users for it.
*/
void (*disable)(struct intel_display *display,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
/*
* Hook for reading the values currently programmed to the DPLL
@@ -84,7 +87,7 @@ struct intel_shared_dpll_funcs {
* verification after a mode set.
*/
bool (*get_hw_state)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
/*
@@ -92,7 +95,7 @@ struct intel_shared_dpll_funcs {
* in state.
*/
int (*get_freq)(struct intel_display *i915,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
};
@@ -119,18 +122,18 @@ struct intel_dpll_mgr {
static void
intel_atomic_duplicate_dpll_state(struct intel_display *display,
- struct intel_shared_dpll_state *shared_dpll)
+ struct intel_dpll_state *dpll_state)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- /* Copy shared dpll state */
- for_each_shared_dpll(display, pll, i)
- shared_dpll[pll->index] = pll->state;
+ /* Copy dpll state */
+ for_each_dpll(display, pll, i)
+ dpll_state[pll->index] = pll->state;
}
-static struct intel_shared_dpll_state *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+static struct intel_dpll_state *
+intel_atomic_get_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
struct intel_display *display = to_intel_display(state);
@@ -141,28 +144,28 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
state->dpll_set = true;
intel_atomic_duplicate_dpll_state(display,
- state->shared_dpll);
+ state->dpll_state);
}
- return state->shared_dpll;
+ return state->dpll_state;
}
/**
- * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * intel_get_dpll_by_id - get a DPLL given its id
* @display: intel_display device instance
* @id: pll id
*
* Returns:
* A pointer to the DPLL with @id
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id)
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
if (pll->info->id == id)
return pll;
}
@@ -172,9 +175,9 @@ intel_get_shared_dpll_by_id(struct intel_display *display,
}
/* For ILK+ */
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -202,7 +205,7 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static i915_reg_t
intel_combo_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (display->platform.dg1)
return DG1_DPLL_ENABLE(pll->info->id);
@@ -215,7 +218,7 @@ intel_combo_pll_enable_reg(struct intel_display *display,
static i915_reg_t
intel_tc_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
@@ -227,7 +230,7 @@ intel_tc_pll_enable_reg(struct intel_display *display,
}
static void _intel_enable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
@@ -237,7 +240,7 @@ static void _intel_enable_shared_dpll(struct intel_display *display,
}
static void _intel_disable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
pll->info->funcs->disable(display, pll);
pll->on = false;
@@ -247,17 +250,17 @@ static void _intel_disable_shared_dpll(struct intel_display *display,
}
/**
- * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc_state: CRTC, and its state, which has a shared DPLL
+ * intel_dpll_enable - enable a CRTC's DPLL
+ * @crtc_state: CRTC, and its state, which has a DPLL
*
- * Enable the shared DPLL used by @crtc.
+ * Enable DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ struct intel_dpll *pll = crtc_state->intel_dpll;
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
if (drm_WARN_ON(display->drm, !pll))
@@ -279,7 +282,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (old_mask) {
drm_WARN_ON(display->drm, !pll->on);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
goto out;
}
drm_WARN_ON(display->drm, pll->on);
@@ -293,17 +296,17 @@ out:
}
/**
- * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * intel_dpll_disable - disable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
- * Disable the shared DPLL used by @crtc.
+ * Disable DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ struct intel_dpll *pll = crtc_state->intel_dpll;
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
if (DISPLAY_VER(display) < 5)
@@ -323,7 +326,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id, crtc->base.name);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
drm_WARN_ON(display->drm, !pll->on);
pll->active_mask &= ~pipe_mask;
@@ -341,11 +344,11 @@ out:
static unsigned long
intel_dpll_mask_all(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
unsigned long dpll_mask = 0;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
dpll_mask |= BIT(pll->info->id);
@@ -354,44 +357,44 @@ intel_dpll_mask_all(struct intel_display *display)
return dpll_mask;
}
-static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *dpll_hw_state,
- unsigned long dpll_mask)
+static struct intel_dpll *
+intel_find_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *dpll_hw_state,
+ unsigned long dpll_mask)
{
struct intel_display *display = to_intel_display(crtc);
unsigned long dpll_mask_all = intel_dpll_mask_all(display);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *unused_pll = NULL;
+ struct intel_dpll_state *dpll_state;
+ struct intel_dpll *unused_pll = NULL;
enum intel_dpll_id id;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
if (!pll)
continue;
/* Only want to check enabled timings first */
- if (shared_dpll[pll->index].pipe_mask == 0) {
+ if (dpll_state[pll->index].pipe_mask == 0) {
if (!unused_pll)
unused_pll = pll;
continue;
}
if (memcmp(dpll_hw_state,
- &shared_dpll[pll->index].hw_state,
+ &dpll_state[pll->index].hw_state,
sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
- shared_dpll[pll->index].pipe_mask,
+ dpll_state[pll->index].pipe_mask,
pll->active_mask);
return pll;
}
@@ -409,76 +412,76 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
}
/**
- * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
+ * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is taken
* @pll: DPLL for which the reference is taken
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Take a reference for @pll tracking the use of it by @crtc.
*/
static void
-intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_get(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
- shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
+ dpll_state->pipe_mask |= BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
static void
-intel_reference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *dpll_hw_state)
+intel_reference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *dpll_hw_state;
+ if (dpll_state[pll->index].pipe_mask == 0)
+ dpll_state[pll->index].hw_state = *dpll_hw_state;
- intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
}
/**
- * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
+ * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is dropped
* @pll: DPLL for which the reference is dropped
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Drop a reference for @pll tracking the end of use of it by @crtc.
*/
void
-intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
- shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
+ dpll_state->pipe_mask &= ~BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
-static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll)
+static void intel_unreference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -489,16 +492,16 @@ static void intel_put_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
- if (!old_crtc_state->shared_dpll)
+ if (!old_crtc_state->intel_dpll)
return;
- intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+ intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
}
/**
- * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * intel_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
*
* This is the dpll version of drm_atomic_helper_swap_state() since the
@@ -508,22 +511,22 @@ static void intel_put_dpll(struct intel_atomic_state *state,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
+void intel_dpll_swap_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
- struct intel_shared_dpll *pll;
+ struct intel_dpll_state *dpll_state = state->dpll_state;
+ struct intel_dpll *pll;
int i;
if (!state->dpll_set)
return;
- for_each_shared_dpll(display, pll, i)
- swap(pll->state, shared_dpll[pll->index]);
+ for_each_dpll(display, pll, i)
+ swap(pll->state, dpll_state[pll->index]);
}
static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -559,7 +562,7 @@ static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
}
static void ibx_pch_dpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -588,7 +591,7 @@ static void ibx_pch_dpll_enable(struct intel_display *display,
}
static void ibx_pch_dpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -609,36 +612,35 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
- if (HAS_PCH_IBX(i915)) {
+ if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_PCH_PLL_B) |
- BIT(DPLL_ID_PCH_PLL_A));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
return -EINVAL;
/* reference the pll */
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -668,7 +670,7 @@ static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->fp1 == b->fp1;
}
-static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -690,7 +692,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -702,7 +704,7 @@ static void hsw_ddi_wrpll_enable(struct intel_display *display,
}
static void hsw_ddi_spll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -713,9 +715,8 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
}
static void hsw_ddi_wrpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
@@ -726,13 +727,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static void hsw_ddi_spll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
@@ -743,11 +743,11 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -769,7 +769,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
}
static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -996,7 +996,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1059,14 +1059,14 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc,
+ return intel_find_dpll(state, crtc,
&crtc_state->dpll_hw_state,
BIT(DPLL_ID_WRPLL2) |
BIT(DPLL_ID_WRPLL1));
@@ -1090,11 +1090,11 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
}
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1113,7 +1113,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
return NULL;
}
- pll = intel_get_shared_dpll_by_id(display, pll_id);
+ pll = intel_get_dpll_by_id(display, pll_id);
if (!pll)
return NULL;
@@ -1122,7 +1122,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
}
static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1162,19 +1162,19 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
BIT(DPLL_ID_SPLL));
}
static int hsw_ddi_spll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1221,7 +1221,7 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll = NULL;
+ struct intel_dpll *pll = NULL;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
@@ -1233,10 +1233,10 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -1270,14 +1270,14 @@ static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->spll == b->spll;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
.get_freq = hsw_ddi_wrpll_get_freq,
};
-static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
@@ -1285,24 +1285,24 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *hw_state)
{
}
static void hsw_ddi_lcpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
@@ -1364,7 +1364,7 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -1378,7 +1378,7 @@ static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
}
static void skl_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1400,7 +1400,7 @@ static void skl_ddi_pll_enable(struct intel_display *display,
}
static void skl_ddi_dpll0_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1409,7 +1409,7 @@ static void skl_ddi_dpll0_enable(struct intel_display *display,
}
static void skl_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
@@ -1420,12 +1420,12 @@ static void skl_ddi_pll_disable(struct intel_display *display,
}
static void skl_ddi_dpll0_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1463,7 +1463,7 @@ out:
}
static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1736,7 +1736,7 @@ skip_remaining_dividers:
}
static int skl_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1884,7 +1884,7 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
}
static int skl_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1939,31 +1939,31 @@ static int skl_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL0));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL0));
else
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL3) |
- BIT(DPLL_ID_SKL_DPLL2) |
- BIT(DPLL_ID_SKL_DPLL1));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
static int skl_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -2004,14 +2004,14 @@ static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->cfgcr2 == b->cfgcr2;
}
-static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
.get_freq = skl_ddi_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
@@ -2038,7 +2038,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2141,7 +2141,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
}
static void bxt_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
@@ -2160,7 +2160,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
}
static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2360,7 +2360,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
}
static int bxt_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2429,20 +2429,20 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -2486,7 +2486,7 @@ static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->pcsdw12 == b->pcsdw12;
}
-static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
@@ -2606,10 +2606,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
return ((display->platform.elkhartlake &&
IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
- display->platform.tigerlake ||
- display->platform.alderlake_s ||
- display->platform.alderlake_p) &&
- display->dpll.ref_clks.nssc == 38400;
+ DISPLAY_VER(display) >= 12) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -2757,7 +2755,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
@@ -2828,7 +2826,7 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3201,7 +3199,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3287,7 +3285,7 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[port_dpll_id];
- crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->intel_dpll = port_dpll->pll;
crtc_state->dpll_hw_state = port_dpll->hw_state;
}
@@ -3390,14 +3388,14 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
/* Eliminate DPLLs from consideration if reserved by HTI */
dpll_mask &= ~intel_hti_dpll_mask(display);
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- dpll_mask);
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_mask);
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3430,8 +3428,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
@@ -3454,26 +3452,25 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(DPLL_ID_ICL_TBTPLL));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
-
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(dpll_id));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(dpll_id));
if (!port_dpll->pll) {
ret = -EINVAL;
goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3481,7 +3478,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
err_unreference_tbt_pll:
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+ intel_unreference_dpll(state, crtc, port_dpll->pll);
return ret;
}
@@ -3523,7 +3520,7 @@ static void icl_put_dplls(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum icl_port_dpll_id id;
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
const struct icl_port_dpll *old_port_dpll =
@@ -3536,12 +3533,12 @@ static void icl_put_dplls(struct intel_atomic_state *state,
if (!old_port_dpll->pll)
continue;
- intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ intel_unreference_dpll(state, crtc, old_port_dpll->pll);
}
}
static bool mg_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3608,7 +3605,7 @@ out:
}
static bool dkl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3680,7 +3677,7 @@ out:
}
static bool icl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
@@ -3741,7 +3738,7 @@ out:
}
static bool combo_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -3750,14 +3747,14 @@ static bool combo_pll_get_hw_state(struct intel_display *display,
}
static bool tbt_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -3799,7 +3796,7 @@ static void icl_dpll_write(struct intel_display *display,
}
static void icl_mg_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3842,7 +3839,7 @@ static void icl_mg_pll_write(struct intel_display *display,
}
static void dkl_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3907,7 +3904,7 @@ static void dkl_pll_write(struct intel_display *display,
}
static void icl_pll_power_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
@@ -3922,7 +3919,7 @@ static void icl_pll_power_enable(struct intel_display *display,
}
static void icl_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
@@ -3932,7 +3929,7 @@ static void icl_pll_enable(struct intel_display *display,
drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
-static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
+static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
{
u32 val;
@@ -3957,7 +3954,7 @@ static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct inte
}
static void combo_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3981,7 +3978,7 @@ static void combo_pll_enable(struct intel_display *display,
}
static void tbt_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4002,7 +3999,7 @@ static void tbt_pll_enable(struct intel_display *display,
}
static void mg_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4027,7 +4024,7 @@ static void mg_pll_enable(struct intel_display *display,
}
static void icl_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
/* The first steps are done by intel_ddi_post_disable(). */
@@ -4058,7 +4055,7 @@ static void icl_pll_disable(struct intel_display *display,
}
static void combo_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -4066,13 +4063,13 @@ static void combo_pll_disable(struct intel_display *display,
}
static void tbt_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
icl_pll_disable(display, pll, TBT_PLL_ENABLE);
}
static void mg_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
@@ -4131,21 +4128,21 @@ static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
}
-static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+static const struct intel_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
.get_freq = icl_ddi_combo_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+static const struct intel_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
.get_freq = icl_ddi_tbt_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+static const struct intel_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
@@ -4193,7 +4190,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.compare_hw_state = icl_compare_hw_state,
};
-static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+static const struct intel_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
@@ -4302,14 +4299,13 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
};
/**
- * intel_shared_dpll_init - Initialize shared DPLLs
+ * intel_dpll_init - Initialize DPLLs
* @display: intel_display device
*
- * Initialize shared DPLLs for @display.
+ * Initialize DPLLs for @display.
*/
-void intel_shared_dpll_init(struct intel_display *display)
+void intel_dpll_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4339,7 +4335,7 @@ void intel_shared_dpll_init(struct intel_display *display)
dpll_mgr = &skl_pll_mgr;
else if (HAS_DDI(display))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
+ else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
@@ -4349,23 +4345,23 @@ void intel_shared_dpll_init(struct intel_display *display)
for (i = 0; dpll_info[i].name; i++) {
if (drm_WARN_ON(display->drm,
- i >= ARRAY_SIZE(display->dpll.shared_dplls)))
+ i >= ARRAY_SIZE(display->dpll.dplls)))
break;
/* must fit into unsigned long bitmask on 32bit */
if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
break;
- display->dpll.shared_dplls[i].info = &dpll_info[i];
- display->dpll.shared_dplls[i].index = i;
+ display->dpll.dplls[i].info = &dpll_info[i];
+ display->dpll.dplls[i].index = i;
}
display->dpll.mgr = dpll_mgr;
- display->dpll.num_shared_dpll = i;
+ display->dpll.num_dpll = i;
}
/**
- * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * intel_dpll_compute - compute DPLL state CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to compute DPLLs for
* @encoder: encoder
@@ -4373,14 +4369,14 @@ void intel_shared_dpll_init(struct intel_display *display)
* This function computes the DPLL state for the given CRTC and encoder.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* Returns:
* 0 on success, negative error code on failure.
*/
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4392,7 +4388,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
@@ -4402,18 +4398,18 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
* state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* The reserved DPLLs should be released by calling
- * intel_release_shared_dplls().
+ * intel_dpll_release().
*
* Returns:
* 0 if all required DPLLs were successfully reserved,
* negative error code otherwise.
*/
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4425,18 +4421,18 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
+ * intel_dpll_release - end use of DPLLs by CRTC in atomic state
* @state: atomic state
* @crtc: crtc from which the DPLLs are to be released
*
- * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * This function releases all DPLLs reserved by intel_dpll_reserve()
* from the current atomic commit @state and the old @crtc atomic state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*/
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4444,7 +4440,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
/*
* FIXME: this function is called for every platform having a
* compute_clock hook, even though the platform doesn't yet support
- * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * the DPLL framework and intel_dpll_reserve() is not
* called on those.
*/
if (!dpll_mgr)
@@ -4454,16 +4450,16 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
* @state: atomic state
* @crtc: the CRTC for which to update the active DPLL
* @encoder: encoder determining the type of port DPLL
*
* Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
- * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * from the port DPLLs reserved previously by intel_dpll_reserve(). The
* DPLL selected will be based on the current mode of the encoder's port.
*/
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
@@ -4485,7 +4481,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
@@ -4503,14 +4499,14 @@ int intel_dpll_get_freq(struct intel_display *display,
* Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_crtc *crtc;
@@ -4524,8 +4520,8 @@ static void readout_dpll_hw_state(struct intel_display *display,
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
- intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
+ if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
+ intel_dpll_crtc_get(crtc, pll, &pll->state);
}
pll->active_mask = pll->state.pipe_mask;
@@ -4542,15 +4538,15 @@ void intel_dpll_update_ref_clks(struct intel_display *display)
void intel_dpll_readout_hw_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
readout_dpll_hw_state(display, pll);
}
static void sanitize_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (!pll->on)
return;
@@ -4569,12 +4565,12 @@ static void sanitize_dpll_state(struct intel_display *display,
void intel_dpll_sanitize_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
intel_cx0_pll_power_save_wa(display);
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
sanitize_dpll_state(display, pll);
}
@@ -4626,7 +4622,7 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
static void
verify_single_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
@@ -4679,15 +4675,15 @@ verify_single_dpll_state(struct intel_display *display,
pll->info->name);
}
-static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
- const struct intel_shared_dpll *new_pll)
+static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
+ const struct intel_dpll *new_pll)
{
return old_pll && new_pll && old_pll != new_pll &&
(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
@@ -4695,34 +4691,34 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(display, new_crtc_state->shared_dpll,
+ if (new_crtc_state->intel_dpll)
+ verify_single_dpll_state(display, new_crtc_state->intel_dpll,
crtc, new_crtc_state);
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
u8 pipe_mask = BIT(crtc->pipe);
- struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+ struct intel_dpll *pll = old_crtc_state->intel_dpll;
INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
- INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
- new_crtc_state->shared_dpll) &&
+ INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
+ new_crtc_state->intel_dpll) &&
pll->state.pipe_mask & pipe_mask,
"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
+void intel_dpll_verify_disabled(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
verify_single_dpll_state(display, pll, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index caffb084830c..f131bdd1c975 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -30,18 +30,18 @@
#include "intel_display_power.h"
#include "intel_wakeref.h"
-#define for_each_shared_dpll(__display, __pll, __i) \
- for ((__i) = 0; (__i) < (__display)->dpll.num_shared_dpll && \
- ((__pll) = &(__display)->dpll.shared_dplls[(__i)]) ; (__i)++)
+#define for_each_dpll(__display, __pll, __i) \
+ for ((__i) = 0; (__i) < (__display)->dpll.num_dpll && \
+ ((__pll) = &(__display)->dpll.dplls[(__i)]) ; (__i)++)
enum tc_port;
struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_funcs;
struct intel_encoder;
struct intel_shared_dpll;
-struct intel_shared_dpll_funcs;
/**
* enum intel_dpll_id - possible DPLL ids
@@ -280,7 +280,7 @@ struct intel_dpll_hw_state {
};
/**
- * struct intel_shared_dpll_state - hold the DPLL atomic state
+ * struct intel_dpll_state - hold the DPLL atomic state
*
* This structure holds an atomic state for the DPLL, that can represent
* either its current state (in struct &intel_shared_dpll) or a desired
@@ -289,7 +289,7 @@ struct intel_dpll_hw_state {
*
* See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
-struct intel_shared_dpll_state {
+struct intel_dpll_state {
/**
* @pipe_mask: mask of pipes using this DPLL, active or not
*/
@@ -314,7 +314,7 @@ struct dpll_info {
/**
* @funcs: platform specific hooks
*/
- const struct intel_shared_dpll_funcs *funcs;
+ const struct intel_dpll_funcs *funcs;
/**
* @id: unique identifier for this DPLL
@@ -344,16 +344,16 @@ struct dpll_info {
};
/**
- * struct intel_shared_dpll - display PLL with tracked state and users
+ * struct intel_dpll - display PLL with tracked state and users
*/
-struct intel_shared_dpll {
+struct intel_dpll {
/**
* @state:
*
* Store the state for the pll, including its hw state
* and CRTCs using it.
*/
- struct intel_shared_dpll_state state;
+ struct intel_dpll_state state;
/**
* @index: index for atomic state
@@ -387,41 +387,41 @@ struct intel_shared_dpll {
#define SKL_DPLL2 2
#define SKL_DPLL3 3
-/* shared dpll functions */
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id);
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state);
+/* dpll functions */
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id);
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state);
+#define assert_dpll_enabled(d, p) assert_dpll(d, p, true)
+#define assert_dpll_disabled(d, p) assert_dpll(d, p, false)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *shared_dpll_state);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
enum icl_port_dpll_id port_dpll_id);
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct intel_display *display);
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_swap_state(struct intel_atomic_state *state);
+void intel_dpll_init(struct intel_display *display);
void intel_dpll_update_ref_clks(struct intel_display *display);
void intel_dpll_readout_hw_state(struct intel_display *display);
void intel_dpll_sanitize_state(struct intel_display *display);
@@ -435,8 +435,8 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state);
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_verify_disabled(struct intel_atomic_state *state);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 0d8ebe38226e..aea249e2699f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,8 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -125,9 +127,9 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment)
{
struct drm_i915_private *i915 = vm->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
@@ -137,7 +139,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
if (i915_gem_object_is_stolen(dpt->obj))
pin_flags |= PIN_MAPPABLE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
@@ -169,7 +171,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return err ? ERR_PTR(err) : vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index d2dede0a5229..5eb88d51dba1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,18 +3,17 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt_common.h"
#include "skl_universal_plane_regs.h"
void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) == 14) {
+ if (DISPLAY_VER(display) == 14) {
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
@@ -22,15 +21,15 @@ void intel_dpt_configure(struct intel_crtc *crtc)
if (plane_id == PLANE_CURSOR)
continue;
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
PLANE_CHICKEN_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
PLANE_CHICKEN_DISABLE_DPT);
}
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
CHICKEN_MISC_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
CHICKEN_MISC_DISABLE_DPT);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 05cd0f6e6d71..0fdb32ef241c 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -5,10 +5,11 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_frontbuffer.h"
@@ -123,9 +124,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+ mod_delayed_work(display->wq.unordered, &crtc->drrs.work, msecs_to_jiffies(1000));
}
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9fc4003d1579..53d8ae3a70e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,13 +4,14 @@
*
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_dsb_buffer.h"
@@ -92,6 +93,10 @@ struct intel_dsb {
/* see DSB_REG_VALUE_MASK */
#define DSB_OPCODE_POLL 0xA
/* see DSB_REG_VALUE_MASK */
+#define DSB_OPCODE_GOSUB 0xC /* ptl+ */
+#define DSB_GOSUB_HEAD_SHIFT 26
+#define DSB_GOSUB_TAIL_SHIFT 0
+#define DSB_GOSUB_CONVERT_ADDR(x) ((x) >> 6)
static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -142,10 +147,10 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- unsigned int latency = skl_watermark_max_latency(i915, 0);
+ unsigned int latency = skl_watermark_max_latency(display, 0);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
@@ -203,6 +208,15 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
crtc->base.base.id, crtc->base.name, dsb->id);
}
+static bool assert_dsb_tail_is_aligned(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+
+ return !drm_WARN_ON(display->drm,
+ !IS_ALIGNED(dsb->free_pos * 4, CACHELINE_BYTES));
+}
+
static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
@@ -227,13 +241,40 @@ static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
}
+unsigned int intel_dsb_size(struct intel_dsb *dsb)
+{
+ return dsb->free_pos * 4;
+}
+
+unsigned int intel_dsb_head(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
+}
+
+static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + intel_dsb_size(dsb);
+}
+
+static void intel_dsb_ins_align(struct intel_dsb *dsb)
+{
+ /*
+ * Every instruction should be 8 byte aligned.
+ *
+ * The only way to get unaligned free_pos is via
+ * intel_dsb_reg_write_indexed() which already
+ * makes sure the next dword is zeroed, so no need
+ * to clear it here.
+ */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+}
+
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
if (!assert_dsb_has_room(dsb))
return;
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ intel_dsb_ins_align(dsb);
dsb->ins_start_offset = dsb->free_pos;
dsb->ins[0] = ldw;
@@ -491,6 +532,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
+ intel_dsb_ins_align(dsb);
+
tail = dsb->free_pos * 4;
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
@@ -501,20 +544,90 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
dsb->free_pos = aligned_tail / 4;
}
-void intel_dsb_finish(struct intel_dsb *dsb)
+static void intel_dsb_gosub_align(struct intel_dsb *dsb)
+{
+ u32 aligned_tail, tail;
+
+ intel_dsb_ins_align(dsb);
+
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
+
+ /*
+ * Wa_16024917128
+ * "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
+ */
+ if (aligned_tail - tail <= 2 * 8)
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
+
+ dsb->free_pos = aligned_tail / 4;
+}
+
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb)
{
struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ unsigned int head, tail;
+ u64 head_tail;
+
+ if (drm_WARN_ON(display->drm, dsb->id != sub_dsb->id))
+ return;
+
+ if (!assert_dsb_tail_is_aligned(sub_dsb))
+ return;
+
+ intel_dsb_gosub_align(dsb);
+
+ head = intel_dsb_head(sub_dsb);
+ tail = intel_dsb_tail(sub_dsb);
/*
- * DSB_FORCE_DEWAKE remains active even after DSB is
- * disabled, so make sure to clear it (if set during
- * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
- * well for good measure.
+ * The GOSUB instruction has the following memory layout.
+ *
+ * +------------------------------------------------------------+
+ * | Opcode | Rsvd | Head Ptr | Tail Ptr |
+ * | 0x0c | | | |
+ * +------------------------------------------------------------+
+ * |<- 8bits->|<- 4bits ->|<-- 26bits -->|<-- 26bits -->|
+ *
+ * We have only 26 bits each to represent the head and tail
+ * pointers even though the addresses itself are of 32 bit. However, this
+ * is not a problem because the addresses are 64 bit aligned and therefore
+ * the last 6 bits are always Zero's. Therefore, we right shift the address
+ * by 6 before embedding it into the GOSUB instruction.
*/
- intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
- intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
- DSB_FORCE_DEWAKE, 0);
+ head_tail = ((u64)(DSB_GOSUB_CONVERT_ADDR(head)) << DSB_GOSUB_HEAD_SHIFT) |
+ ((u64)(DSB_GOSUB_CONVERT_ADDR(tail)) << DSB_GOSUB_TAIL_SHIFT);
+
+ intel_dsb_emit(dsb, lower_32_bits(head_tail),
+ (DSB_OPCODE_GOSUB << DSB_OPCODE_SHIFT) |
+ upper_32_bits(head_tail));
+
+ /*
+ * "NOTE: the instructions within the cacheline
+ * FOLLOWING the GOSUB instruction must be NOPs."
+ */
+ intel_dsb_align_tail(dsb);
+}
+
+void intel_dsb_gosub_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
+
+ /*
+ * Wa_16024917128
+ * "Ensure that all subroutines called by GOSUB end with a cacheline of NOPs"
+ */
+ intel_dsb_noop(dsb, 8);
+
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
+}
+
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
intel_dsb_align_tail(dsb);
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
@@ -537,6 +650,9 @@ static u32 dsb_error_int_status(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_STATUS;
+ if (DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_STATUS;
+
return errors;
}
@@ -551,17 +667,46 @@ static u32 dsb_error_int_en(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_EN;
+ /*
+ * Wa_16024917128
+ * "Disable nested GOSUB interrupt (DSB_INTERRUPT bit 21)"
+ */
+ if (0 && DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_EN;
+
return errors;
}
+/*
+ * FIXME calibrate these sensibly, ideally compute based on
+ * the number of regisetrs to be written. But that requires
+ * measuring the actual DSB execution speed on each platform
+ * (and the speed also depends on CDCLK and memory clock)...
+ */
+static int intel_dsb_noarm_exec_time_us(void)
+{
+ return 80;
+}
+
+static int intel_dsb_arm_exec_time_us(void)
+{
+ return 20;
+}
+
+int intel_dsb_exec_time_us(void)
+{
+ return intel_dsb_noarm_exec_time_us() +
+ intel_dsb_arm_exec_time_us();
+}
+
void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- /* FIXME calibrate sensibly */
- int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
+ int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_dsb_arm_exec_time_us());
int start, end;
/*
@@ -603,13 +748,11 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_crtc *crtc = dsb->crtc;
enum pipe pipe = crtc->pipe;
- u32 tail;
if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
return;
- tail = chained_dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(chained_dsb))
return;
intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
@@ -629,13 +772,15 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
DSB_ENABLE_DEWAKE |
DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
+ } else {
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id), 0);
}
intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
+ intel_dsb_head(chained_dsb));
intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
+ intel_dsb_tail(chained_dsb));
if (ctrl & DSB_WAIT_FOR_VBLANK) {
/*
@@ -650,6 +795,13 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_wait_scanline_out(state, dsb,
dsb_dewake_scanline_start(state, crtc),
dsb_dewake_scanline_end(state, crtc));
+
+ /*
+ * DSB_FORCE_DEWAKE remains active even after DSB is
+ * disabled, so make sure to clear it.
+ */
+ intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
+ DSB_FORCE_DEWAKE, 0);
}
}
@@ -674,16 +826,19 @@ void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
intel_dsb_wait_usec(dsb, usecs);
}
-static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- int hw_dewake_scanline)
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: DSB context
+ *
+ * This function is used to do actual write to hardware using DSB.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 tail;
- tail = dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(dsb))
return;
if (is_dsb_busy(display, pipe, dsb->id)) {
@@ -693,7 +848,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
- ctrl | DSB_ENABLE);
+ DSB_ENABLE);
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
dsb->chicken);
@@ -702,45 +857,13 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
dsb_error_int_en(display) | DSB_PROG_INT_EN);
- intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
-
- if (hw_dewake_scanline >= 0) {
- int diff, position;
+ intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id), 0);
- intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
- DSB_ENABLE_DEWAKE |
- DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
-
- /*
- * Force DEwake immediately if we're already past
- * or close to racing past the target scanline.
- */
- position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
-
- diff = hw_dewake_scanline - position;
- intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
- (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
- DSB_BLOCK_DEWAKE_EXTENSION);
- }
+ intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
+ intel_dsb_head(dsb));
intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
-}
-
-/**
- * intel_dsb_commit() - Trigger workload execution of DSB.
- * @dsb: DSB context
- * @wait_for_vblank: wait for vblank before executing
- *
- * This function is used to do actual write to hardware using DSB.
- */
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank)
-{
- _intel_dsb_commit(dsb,
- wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
- wait_for_vblank ? dsb->hw_dewake_scanline : -1);
+ intel_dsb_tail(dsb));
}
void intel_dsb_wait(struct intel_dsb *dsb)
@@ -795,22 +918,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(state);
+ struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
- if (!HAS_DSB(i915))
+ if (!HAS_DSB(display))
return NULL;
- if (!i915->display.params.enable_dsb)
+ if (!display->params.enable_dsb)
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
@@ -818,7 +941,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
dsb->crtc = crtc;
@@ -831,10 +954,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
return dsb;
out_put_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
kfree(dsb);
out:
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
crtc->base.base.id, crtc->base.name, dsb_id);
@@ -893,4 +1016,7 @@ void intel_dsb_irq_handler(struct intel_display *display,
if (errors & DSB_POLL_ERR_INT_STATUS)
drm_err(display->drm, "[CRTC:%d:%s] DSB %d poll error\n",
crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_GOSUB_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d GOSUB programming error\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index e843c52bf97c..c8f4499916eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -26,12 +26,16 @@ enum intel_dsb_id {
I915_MAX_DSBS,
};
+unsigned int intel_dsb_size(struct intel_dsb *dsb);
+unsigned int intel_dsb_head(struct intel_dsb *dsb);
struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc,
enum intel_dsb_id dsb_id,
unsigned int max_cmds);
void intel_dsb_finish(struct intel_dsb *dsb);
+void intel_dsb_gosub_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
+int intel_dsb_exec_time_us(void);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
@@ -57,13 +61,14 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
void intel_dsb_poll(struct intel_dsb *dsb,
i915_reg_t reg, u32 mask, u32 val,
int wait_us, int count);
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb);
void intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
bool wait_for_vblank);
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank);
+void intel_dsb_commit(struct intel_dsb *dsb);
void intel_dsb_wait(struct intel_dsb *dsb);
void intel_dsb_irq_handler(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_regs.h b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
index cb6e0e5624a6..230104f36145 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
@@ -51,11 +51,13 @@
#define DSB_RESET_SM_STATE_MASK REG_GENMASK(5, 4)
#define DSB_RUN_SM_STATE_MASK REG_GENMASK(2, 0)
#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_GOSUB_INT_EN REG_BIT(21) /* ptl+ */
#define DSB_ATS_FAULT_INT_EN REG_BIT(20) /* mtl+ */
#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
#define DSB_POLL_ERR_INT_EN REG_BIT(17)
#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_GOSUB_INT_STATUS REG_BIT(5) /* ptl+ */
#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) /* mtl+ */
#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 403151175a87..a8f012119165 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -4,8 +4,9 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_dsi.h"
#include "intel_panel.h"
@@ -116,14 +117,14 @@ struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum drm_panel_orientation orientation;
orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->display.vbt.orientation;
+ orientation = display->vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 049443245310..b3c453bf7d5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -24,9 +24,10 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using DCS for backlight control\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7b2ffd14ae6e..e6a851d276f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,17 +31,16 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
-
#include <linux/unaligned.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
@@ -102,13 +101,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
flags = *data++;
type = *data++;
@@ -120,12 +119,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
- if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port]))
goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ drm_dbg_kms(display->drm, "no dsi device for port %c\n",
port_name(port));
goto out;
}
@@ -150,8 +149,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg(&dev_priv->drm,
- "Generic Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -163,15 +161,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg(&dev_priv->drm,
- "DCS Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -182,10 +179,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
+ drm_dbg_kms(display->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
@@ -196,7 +193,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
const char *con_id, u8 idx, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
@@ -204,10 +201,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
if (gpio_desc) {
gpiod_set_value(gpio_desc, value);
} else {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx,
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
if (IS_ERR(gpio_desc)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"GPIO index %u request failed (%pe)\n",
gpio_index, gpio_desc);
return;
@@ -242,16 +239,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
static void vlv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
if (connector->panel.vbt.dsi.seq_version < 3) {
if (gpio_source == 1) {
- drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ drm_dbg_kms(display->drm, "SC gpio not supported\n");
return;
}
if (gpio_source > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
@@ -264,7 +261,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector,
static void chv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
@@ -284,13 +281,13 @@ static void chv_gpio_set_value(struct intel_connector *connector,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid gpio index %u for GPIO N\n",
gpio_index);
return;
@@ -320,13 +317,12 @@ enum {
MIPI_VIO_EN_2,
};
-static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+static void icl_native_gpio_set_value(struct intel_display *display,
int gpio, bool value)
{
- struct intel_display *display = &dev_priv->display;
int index;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2))
return;
switch (gpio) {
@@ -343,25 +339,25 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
* The locking protects against concurrent SHOTPLUG_CTL_DDI
* modifications in irq setup and handling.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ spin_lock_irq(&display->irq.lock);
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
break;
case MIPI_AVDD_EN_1:
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON,
+ intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE,
+ intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
@@ -389,13 +385,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
int size;
- bool native = DISPLAY_VER(i915) >= 11;
+ bool native = DISPLAY_VER(display) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3) {
size = 3;
@@ -416,16 +411,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio_source = (data[1] >> 1) & 3;
}
- drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(i915, gpio_number, value);
- else if (DISPLAY_VER(i915) >= 9)
+ icl_native_gpio_set_value(display, gpio_number, value);
+ else if (DISPLAY_VER(display) >= 9)
bxt_gpio_set_value(connector, gpio_index, value);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
return data + size;
@@ -463,8 +458,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 target_addr)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct acpi_device *adev = ACPI_COMPANION(display->drm->dev);
struct i2c_adapter_lookup lookup = {
.target_addr = target_addr,
.intel_dsi = intel_dsi,
@@ -484,7 +479,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -494,7 +489,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
@@ -504,7 +499,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -522,7 +517,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to xfer payload of size (%u) to reg (%u)\n",
payload_size, reg_offset);
@@ -535,16 +530,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
- drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+ drm_dbg_kms(display->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -560,9 +555,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+ drm_err(display->drm, "%s failed, error: %d\n", __func__, ret);
#else
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
@@ -612,12 +607,12 @@ static const char *sequence_name(enum mipi_seq seq_id)
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
@@ -625,9 +620,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (!data)
return;
- drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+ drm_WARN_ON(display->drm, *data != seq_id);
- drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
@@ -657,19 +652,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
return;
@@ -695,54 +690,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
- drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
- intel_dsi->pixel_overlap);
- drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
- drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode == BURST_MODE ?
- "burst" : "<unknown>");
- drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
- intel_dsi->burst_mode_ratio);
- drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
- drm_dbg_kms(&i915->drm, "Eot %s\n",
- str_enabled_disabled(intel_dsi->eotp_pkt));
- drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- str_enabled_disabled(!intel_dsi->clock_stop));
- drm_dbg_kms(&i915->drm, "Mode %s\n",
- intel_dsi->operation_mode ? "command" : "video");
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
+ "DSI parameters:");
+
+ drm_printf(&p, "Pclk %d\n", intel_dsi->pclk);
+ drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count);
+ drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_printf(&p, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
- drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
- drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
- drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
- intel_dsi->lp_rx_timeout);
- drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
- intel_dsi->turn_arnd_val);
- drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
- drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
- intel_dsi->hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
- intel_dsi->clk_lp_to_hs_count);
- drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
- intel_dsi->clk_hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "BTA %s\n",
- str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_printf(&p, "Dual link: NONE\n");
+ drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ drm_printf(&p, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
@@ -764,8 +749,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
@@ -773,7 +757,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -819,7 +803,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 bitrate;
if (mipi_config->target_burst_mode_freq == 0) {
- drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ drm_err(display->drm, "Burst mode target is not set\n");
return false;
}
@@ -836,7 +820,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
+ drm_err(display->drm, "Burst mode freq is less than computed\n");
return false;
}
@@ -900,8 +884,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
@@ -911,13 +894,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct pinctrl *pinctrl;
int ret;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) {
gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -926,12 +909,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to register pwm0 pinmux mapping\n");
- pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to set pinmux to PWM\n");
}
@@ -939,9 +922,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
@@ -949,9 +932,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- devm_gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(display->drm->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c16fb34b737d..08b48e36aca6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,13 +31,14 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
@@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
*pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
@@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(display, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
@@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(display, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port = encoder->port;
@@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
u32 dvo_val;
/* Save the active data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(i915, DVO(port)) &
+ dvo_val = intel_de_read(display, DVO(port)) &
(DVO_DEDICATED_INT_ENABLE |
DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
@@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(i915, DVO_SRCDIM(port),
+ intel_de_write(display, DVO_SRCDIM(port),
DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
- intel_de_write(i915, DVO(port), dvo_val);
+ intel_de_write(display, DVO(port), dvo_val);
}
static enum drm_connector_status
@@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
@@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
}
}
-static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+static bool intel_dvo_init_dev(struct intel_display *display,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
- struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe)
- dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0,
+ for_each_pipe(display, pipe)
+ dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0,
DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display, DPLL(display, pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
return ret;
}
-static bool intel_dvo_probe(struct drm_i915_private *i915,
+static bool intel_dvo_probe(struct intel_display *display,
struct intel_dvo *intel_dvo)
{
int i;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- if (intel_dvo_init_dev(i915, intel_dvo,
+ if (intel_dvo_init_dev(display, intel_dvo,
&intel_dvo_devices[i]))
return true;
}
@@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
return false;
}
-void intel_dvo_init(struct drm_i915_private *i915)
+void intel_dvo_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->pre_enable = intel_dvo_pre_enable;
connector->get_hw_state = intel_dvo_connector_get_hw_state;
- if (!intel_dvo_probe(i915, intel_dvo)) {
+ if (!intel_dvo_probe(display, intel_dvo)) {
kfree(intel_dvo);
intel_connector_free(connector);
return;
@@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
BIT(INTEL_OUTPUT_DVO);
- drm_encoder_init(&i915->drm, &encoder->base,
+ drm_encoder_init(display->drm, &encoder->base,
&intel_dvo_enc_funcs,
intel_dvo_encoder_type(&intel_dvo->dev),
"DVO %c", port_name(encoder->port));
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n",
encoder->base.base.id, encoder->base.name,
intel_dvo->dev.name);
@@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index bf7a356422ab..83776552fc87 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_DVO_H__
#define __INTEL_DVO_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct intel_display *display);
#else
-static inline void intel_dvo_init(struct drm_i915_private *dev_priv)
+static inline void intel_dvo_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 21d638535497..0b7bd26f4339 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -5,8 +5,7 @@
#include <linux/workqueue.h>
-#include "i915_drv.h"
-
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
@@ -32,9 +31,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder)
void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mod_delayed_work(i915->unordered_wq,
+ mod_delayed_work(display->wq.unordered,
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 2b0e0f220442..0da842bd2f2f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,14 +11,15 @@
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -421,21 +422,22 @@ unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
/**
* intel_fb_get_format_info: Get a modifier specific format information
- * @cmd: FB add command structure
+ * @pixel_format: pixel format
+ * @modifier: modifier
*
* Returns:
- * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
+ * Returns the format information for @pixel_format specific to @modifier,
* or %NULL if the modifier doesn't override the format.
*/
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+intel_fb_get_format_info(u32 pixel_format, u64 modifier)
{
- const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
if (!md || !md->formats)
return NULL;
- return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
+ return lookup_format_info(md->formats, md->format_count, pixel_format);
}
static bool plane_caps_contain_any(u8 caps, u8 mask)
@@ -560,11 +562,11 @@ static bool plane_has_modifier(struct intel_display *display,
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
return false;
return true;
@@ -1285,10 +1287,10 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return DISPLAY_VER(dev_priv) < 4 ||
+ return DISPLAY_VER(display) < 4 ||
(plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -2205,6 +2207,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_display *display = to_intel_display(obj->dev);
@@ -2252,7 +2255,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err_frontbuffer_put;
}
- drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++) {
unsigned int stride_alignment;
@@ -2322,6 +2325,7 @@ err:
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
@@ -2332,7 +2336,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (IS_ERR(obj))
return ERR_CAST(obj);
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(obj, info, &mode_cmd);
drm_gem_object_put(obj);
return fb;
@@ -2340,16 +2344,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ ret = intel_framebuffer_init(intel_fb, obj, info, mode_cmd);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index bdd76b372957..403b8b63721a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -47,7 +47,7 @@ u64 *intel_fb_plane_get_modifiers(struct intel_display *display,
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+intel_fb_get_format_info(u32 pixel_format, u64 modifier);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
@@ -102,13 +102,16 @@ void intel_add_fb_offsets(int *x, int *y,
int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index 3d338a728354..b0e8b89f7ce8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 30ac9b089ad6..5a0151775a3a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -11,11 +11,13 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_plane.h"
static struct i915_vma *
intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
@@ -117,7 +119,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
@@ -136,7 +138,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
@@ -215,7 +217,7 @@ err:
vma = ERR_PTR(ret);
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return vma;
}
@@ -332,3 +334,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_dpt_unpin_from_ggtt(fb->dpt_vm);
}
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ iosys_map_set_vaddr_iomem(map, i915_vma_get_iomap(vma));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index 01770dbba2e0..81ab79da1af7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -12,6 +12,7 @@ struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
struct i915_gtt_view;
+struct iosys_map;
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
@@ -27,5 +28,6 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags);
int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b6978135e8ad..685ac98bd001 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -45,9 +45,10 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_gt_types.h"
+
#include "i915_drv.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
@@ -55,6 +56,8 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_display_wa.h"
@@ -251,9 +254,12 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* Gen9 hw miscalculates cfb stride for linear as
* PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
* we always need to use the override there.
+ *
+ * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) ||
+ display->platform.battlemage)
return stride_aligned * 4 / 64;
return 0;
@@ -519,6 +525,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc)
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
+static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
+ bool disable)
+{
+ struct intel_display *display = fbc->display;
+
+ if (display->platform.dg2)
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
+ disable ? DG2_DPFC_GATING_DIS : 0);
+ else if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
+ MTL_DPFC_GATING_DIS,
+ disable ? MTL_DPFC_GATING_DIS : 0);
+}
+
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
@@ -921,6 +941,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+
+ /* wa_18038517565 Disable DPFC clock gating before FBC enable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
@@ -1436,7 +1460,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(i915)) {
+ if (intel_display_needs_wa_16023588340(display)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1464,14 +1488,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*
- * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
- * coexist. So if PSR2 selective fetch is supported then mark that
- * FBC is not supported.
- * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
+ * TODO: Implement a logic to select between PSR2 selective fetch and
+ * FBC based on Bspec: 68881 in xe2lpd onwards.
+ *
+ * As we still see some strange underruns in those platforms while
+ * disabling PSR2, keep FBC disabled in case of selective update is on
+ * until the selection logic is implemented.
*/
- if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
- crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
- plane_state->no_fbc_reason = "PSR2 enabled";
+ if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
+ plane_state->no_fbc_reason = "Selective update enabled";
return 0;
}
@@ -1547,7 +1572,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
+ if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
plane_state->no_fbc_reason = "pixel rate too high";
return 0;
}
@@ -1681,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
+
fbc->state.plane = NULL;
fbc->flip_pending = false;
fbc->busy_bits = 0;
@@ -1982,7 +2011,7 @@ void intel_fbc_reset_underrun(struct intel_display *display)
static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = to_i915(fbc->display->drm);
+ struct intel_display *display = fbc->display;
/*
* There's no guarantee that underrun_detected won't be set to true
@@ -1995,7 +2024,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- queue_work(i915->unordered_wq, &fbc->underrun_work);
+ queue_work(display->wq.unordered, &fbc->underrun_work);
}
/**
@@ -2120,13 +2149,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
drm_modeset_lock_all(display->drm);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&fbc->lock);
if (fbc->active) {
@@ -2151,7 +2179,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
drm_modeset_unlock_all(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc19d5607de..7c4709d58aa3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,9 +47,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -65,9 +66,9 @@ struct intel_fbdev {
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+ struct intel_display *display = to_intel_display(fb_helper->client.dev);
- return i915->display.fbdev.fbdev;
+ return display->fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -209,11 +210,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
@@ -226,7 +226,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (fb &&
(sizes->fb_width > fb->base.width ||
sizes->fb_height > fb->base.height)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
fb->base.width, fb->base.height,
@@ -234,20 +234,20 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
drm_framebuffer_put(&fb->base);
fb = NULL;
}
- if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
+ drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
fb = intel_fbdev_fb_alloc(helper, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
- drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
+ drm_dbg_kms(display->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = fb->base.width;
sizes->fb_height = fb->base.height;
}
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -265,7 +265,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
- drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -277,11 +277,11 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
+ drm_fb_helper_fill_info(info, display->drm->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,21 +292,22 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
fb->base.width, fb->base.height,
i915_ggtt_offset(vma));
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
out_unpin:
intel_fb_unpin_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
+
return ret;
}
@@ -319,16 +320,15 @@ out_unlock:
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
-static bool intel_fbdev_init_bios(struct drm_device *dev,
+static bool intel_fbdev_init_bios(struct intel_display *display,
struct intel_fbdev *ifbdev)
{
- struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct intel_crtc *crtc;
unsigned int max_size = 0;
/* Find the largest fb */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -338,21 +338,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
if (!obj) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no fb, skipping\n",
plane->base.base.id, plane->base.name);
continue;
}
if (obj->size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
@@ -361,13 +361,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -375,13 +375,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
unsigned int cur_size;
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
- drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ drm_dbg_kms(display->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
plane->base.base.id, plane->base.name);
/*
@@ -392,7 +392,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, fb->base.pitches[0]);
@@ -403,7 +403,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
crtc->base.base.id, crtc->base.name,
crtc_state->uapi.adjusted_mode.crtc_hdisplay,
@@ -412,7 +412,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size);
if (cur_size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, max_size);
@@ -420,14 +420,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb big enough [PLANE:%d:%s] (%d >= %d)\n",
plane->base.base.id, plane->base.name,
max_size, cur_size);
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -437,7 +437,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -448,13 +448,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc_state->uapi.active)
continue;
- drm_WARN(dev, !plane_state->uapi.fb,
+ drm_WARN(display->drm, !plane_state->uapi.fb,
"re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
}
- drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
+ drm_dbg_kms(display->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -479,26 +479,25 @@ static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
}
}
-void intel_fbdev_setup(struct drm_i915_private *i915)
+void intel_fbdev_setup(struct intel_display *display)
{
- struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
unsigned int preferred_bpp = 0;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(display->drm, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- i915->display.fbdev.fbdev = ifbdev;
- if (intel_fbdev_init_bios(dev, ifbdev))
+ display->fbdev.fbdev = ifbdev;
+ if (intel_fbdev_init_bios(display, ifbdev))
preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
if (!preferred_bpp)
preferred_bpp = 32;
- drm_client_setup_with_color_mode(dev, preferred_bpp);
+ drm_client_setup_with_color_mode(display->drm, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
@@ -513,3 +512,8 @@ struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
{
return fbdev ? fbdev->vma : NULL;
}
+
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+ intel_fb_get_map(fbdev->vma, map);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 89bad3a2b01a..150cc5f45bb3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,23 +10,24 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
-struct drm_i915_private;
+struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
+struct iosys_map;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = intel_fbdev_driver_fbdev_probe
-void intel_fbdev_setup(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
-
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct intel_display *display)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
@@ -39,6 +40,9 @@ static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev
return NULL;
}
+static inline void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+}
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 4991c35a2632..210aee9ae88b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -15,9 +16,9 @@
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size;
@@ -50,26 +51,31 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd);
+ fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
i915_gem_object_put(obj);
return to_intel_framebuffer(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -101,7 +107,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index e502ae375fc0..cb7957272715 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -9,13 +9,13 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_gem_object;
-struct drm_i915_private;
struct fb_info;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 40deee0769ae..8039a84671cc 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -6,15 +6,17 @@
#include <linux/string_helpers.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
-#include "intel_dp.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_link_bw.h"
@@ -464,7 +466,6 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -483,7 +484,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -607,7 +608,6 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -647,7 +647,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -698,7 +698,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -911,7 +911,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ drm_WARN_ON(display->drm, crtc_state->intel_dpll->info->id != DPLL_ID_SPLL);
intel_ddi_enable_clock(encoder, crtc_state);
/* Start the training iterating through available voltages and emphasis,
@@ -1077,7 +1077,6 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
void ilk_fdi_disable(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -1096,7 +1095,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
@@ -1106,7 +1105,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 7a8fbff39be0..c2ce8461ac9e 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -25,10 +25,12 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
@@ -57,11 +59,10 @@
static bool ivb_can_enable_err_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -75,11 +76,10 @@ static bool ivb_can_enable_err_int(struct intel_display *display)
static bool cpt_can_enable_serr_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -94,11 +94,10 @@ static bool cpt_can_enable_serr_int(struct intel_display *display)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(display, crtc->pipe);
u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -115,10 +114,9 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -136,24 +134,22 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
else
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
}
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -169,7 +165,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
@@ -177,9 +172,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
if (!ivb_can_enable_err_int(display))
return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(display, DE_ERR_INT_IVB);
} else {
- ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(display, DE_ERR_INT_IVB);
if (old &&
intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -193,36 +188,32 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
+ ibx_enable_display_interrupt(display, bit);
else
- ibx_disable_display_interrupt(dev_priv, bit);
+ ibx_disable_display_interrupt(display, bit);
}
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = intel_de_read(display, SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -240,8 +231,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable) {
intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
@@ -249,9 +238,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
if (!cpt_can_enable_serr_int(display))
return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(display, SDE_ERROR_CPT);
} else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_disable_display_interrupt(display, SDE_ERROR_CPT);
if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -265,11 +254,10 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -305,13 +293,12 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *displa
bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(display, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
@@ -334,7 +321,6 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -348,12 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
* crtc on LPT won't cause issues.
*/
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
ibx_set_fifo_underrun_reporting(display,
pch_transcoder,
enable);
@@ -362,7 +348,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
pch_transcoder,
enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return old;
}
@@ -429,10 +415,9 @@ void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
*/
void intel_check_cpu_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
@@ -444,7 +429,7 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
ivb_check_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
/**
@@ -457,28 +442,25 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
*/
void intel_check_pch_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
cpt_check_pch_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
crtc->cpu_fifo_underrun_disabled = !enable;
/*
@@ -490,6 +472,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (intel_has_pch_trancoder(i915, crtc->pipe))
+ if (intel_has_pch_trancoder(display, crtc->pipe))
crtc->pch_fifo_underrun_disabled = !enable;
}
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
new file mode 100644
index 000000000000..6ab2272ab2df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_step.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_flipq.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dsb.h"
+#include "intel_vblank.h"
+#include "intel_vrr.h"
+
+/**
+ * DOC: DMC Flip Queue
+ *
+ * A flip queue is a ring buffer implemented by the pipe DMC firmware.
+ * The driver inserts entries into the queues to be executed by the
+ * pipe DMC at a specified presentation timestamp (PTS).
+ *
+ * Each pipe DMC provides several queues:
+ *
+ * - 1 general queue (two DSB buffers executed per entry)
+ * - 3 plane queues (one DSB buffer executed per entry)
+ * - 1 fast queue (deprecated)
+ */
+
+#define for_each_flipq(flipq_id) \
+ for ((flipq_id) = INTEL_FLIPQ_PLANE_1; (flipq_id) < MAX_INTEL_FLIPQ; (flipq_id)++)
+
+static int intel_flipq_offset(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ return 0x008;
+ case INTEL_FLIPQ_PLANE_2:
+ return 0x108;
+ case INTEL_FLIPQ_PLANE_3:
+ return 0x208;
+ case INTEL_FLIPQ_GENERAL:
+ return 0x308;
+ case INTEL_FLIPQ_FAST:
+ return 0x3c8;
+ default:
+ MISSING_CASE(flipq_id);
+ return 0;
+ }
+}
+
+static int intel_flipq_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 64;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 48;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_elem_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 4;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 6;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_size_entries(enum intel_flipq_id flipq_id)
+{
+ return intel_flipq_size_dw(flipq_id) / intel_flipq_elem_size_dw(flipq_id);
+}
+
+static void intel_flipq_crtc_init(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_flipq_id flipq_id;
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ flipq->start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc) + intel_flipq_offset(flipq_id);
+ flipq->flipq_id = flipq_id;
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] FQ %d: start 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ flipq_id, flipq->start_mmioaddr);
+ }
+}
+
+bool intel_flipq_supported(struct intel_display *display)
+{
+ if (!display->params.enable_flipq)
+ return false;
+
+ if (!display->dmc.dmc)
+ return false;
+
+ if (DISPLAY_VER(display) == 20)
+ return true;
+
+ /* DMC firmware expects VRR timing generator to be used */
+ return DISPLAY_VER(display) >= 30 && intel_vrr_always_use_vrr_tg(display);
+}
+
+void intel_flipq_init(struct intel_display *display)
+{
+ struct intel_crtc *crtc;
+
+ intel_dmc_wait_fw_load(display);
+
+ for_each_intel_crtc(display->drm, crtc)
+ intel_flipq_crtc_init(crtc);
+}
+
+static int cdclk_factor(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return 120;
+ else
+ return 280;
+}
+
+int intel_flipq_exec_time_us(struct intel_display *display)
+{
+ return intel_dsb_exec_time_us() +
+ DIV_ROUND_UP(display->cdclk.hw.cdclk * cdclk_factor(display), 540000) +
+ display->sagv.block_time_us;
+}
+
+static int intel_flipq_preempt_timeout_ms(struct intel_display *display)
+{
+ return DIV_ROUND_UP(intel_flipq_exec_time_us(display), 1000);
+}
+
+static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_rmw(display, PIPEDMC_FQ_CTRL(crtc->pipe),
+ PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
+
+ if (preempt &&
+ intel_de_wait_for_clear(display,
+ PIPEDMC_FQ_STATUS(crtc->pipe),
+ PIPEDMC_FQ_STATUS_BUSY,
+ intel_flipq_preempt_timeout_ms(display)))
+ drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
+ crtc->base.base.id, crtc->base.name);
+}
+
+static int intel_flipq_current_head(struct intel_crtc *crtc, enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ return intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id));
+}
+
+static void intel_flipq_write_tail(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe),
+ PIPEDMC_FPQ_PLANEQ_3_TP(crtc->flipq[INTEL_FLIPQ_PLANE_3].tail) |
+ PIPEDMC_FPQ_PLANEQ_2_TP(crtc->flipq[INTEL_FLIPQ_PLANE_2].tail) |
+ PIPEDMC_FPQ_PLANEQ_1_TP(crtc->flipq[INTEL_FLIPQ_PLANE_1].tail) |
+ PIPEDMC_FPQ_FASTQ_TP(crtc->flipq[INTEL_FLIPQ_FAST].tail) |
+ PIPEDMC_FPQ_GENERALQ_TP(crtc->flipq[INTEL_FLIPQ_GENERAL].tail));
+}
+
+static void intel_flipq_sw_dmc_wake(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_CTL1(crtc->pipe), PIPEDMC_SW_DMC_WAKE);
+}
+
+static int intel_flipq_exec_time_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_flipq_exec_time_us(display));
+}
+
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ u32 tmp;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d @ 0x%x: ",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ flipq->start_mmioaddr);
+ for (int i = 0 ; i < intel_flipq_size_dw(flipq_id); i++) {
+ printk(KERN_CONT " 0x%08x",
+ intel_de_read(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, i)));
+ if (i % intel_flipq_elem_size_dw(flipq_id) == intel_flipq_elem_size_dw(flipq_id) - 1)
+ printk(KERN_CONT "\n");
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: chp=0x%x, hp=0x%x\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)),
+ intel_de_read(display, PIPEDMC_FPQ_HP(crtc->pipe, flipq_id)));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: current head %d\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_flipq_current_head(crtc, flipq_id));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue timestamp: 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe)));
+
+ tmp = intel_de_read(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue atomic tails: P3 %d, P2 %d, P1 %d, G %d, F %d\n",
+ crtc->base.base.id, crtc->base.name,
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_GENERALQ_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_FASTQ_TP_MASK, tmp));
+}
+
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ enum intel_flipq_id flipq_id;
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(pipe), 0);
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(pipe), 0);
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ intel_de_write(display, PIPEDMC_FPQ_HP(pipe, flipq_id), 0);
+ intel_de_write(display, PIPEDMC_FPQ_CHP(pipe, flipq_id), 0);
+
+ flipq->tail = 0;
+ }
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(pipe), 0);
+}
+
+static enum pipedmc_event_id flipq_event_id(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER;
+ else
+ return PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER;
+}
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ /* FIXME what to do with VRR? */
+ int scanline = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
+ intel_flipq_exec_time_lines(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30) {
+ u32 start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc);
+
+ /* undocumented magic DMC variables */
+ intel_de_write(display, PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr),
+ intel_flipq_exec_time_lines(crtc_state));
+ intel_de_write(display, PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr),
+ 100);
+ }
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe),
+ PIPEDMC_SCANLINE_UPPER(scanline));
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe),
+ PIPEDMC_SCANLINEINRANGECMP_EN |
+ PIPEDMC_SCANLINE_LOWER(scanline - 2));
+
+ intel_pipedmc_enable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), PIPEDMC_FQ_CTRL_ENABLE);
+}
+
+void intel_flipq_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_flipq_preempt(crtc, true);
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), 0);
+
+ intel_pipedmc_disable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), 0);
+}
+
+static bool assert_flipq_has_room(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ int head, size = intel_flipq_size_entries(flipq_id);
+
+ head = intel_flipq_current_head(crtc, flipq_id);
+
+ return !drm_WARN(display->drm,
+ (flipq->tail + size - head) % size >= size - 1,
+ "[CRTC:%d:%s] FQ %d overflow (head %d, tail %d, size %d)\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ head, flipq->tail, size);
+}
+
+static void intel_flipq_write(struct intel_display *display,
+ struct intel_flipq *flipq, u32 data, int i)
+{
+ intel_de_write(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, flipq->tail *
+ intel_flipq_elem_size_dw(flipq->flipq_id) + i), data);
+}
+
+static void lnl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+static void ptl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ if (!assert_flipq_has_room(crtc, flipq_id))
+ return;
+
+ pts += intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe));
+
+ intel_flipq_preempt(crtc, true);
+
+ if (DISPLAY_VER(display) >= 30)
+ ptl_flipq_add(display, flipq, pts, dsb_id, dsb);
+ else
+ lnl_flipq_add(display, flipq, pts, dsb_id, dsb);
+
+ flipq->tail = (flipq->tail + 1) % intel_flipq_size_entries(flipq->flipq_id);
+ intel_flipq_write_tail(crtc);
+
+ intel_flipq_preempt(crtc, false);
+
+ intel_flipq_sw_dmc_wake(crtc);
+}
+
+/* Wa_18034343758 */
+static bool need_dmc_halt_wa(struct intel_display *display)
+{
+ return DISPLAY_VER(display) == 20 ||
+ (display->platform.pantherlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
+}
+
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_wait_usec(dsb, 2);
+}
+
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.h b/drivers/gpu/drm/i915/display/intel_flipq.h
new file mode 100644
index 000000000000..012e3e9a6bcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_FLIPQ_H__
+#define __INTEL_FLIPQ_H__
+
+#include <linux/types.h>
+
+enum intel_dsb_id;
+enum intel_flipq_id;
+enum pipe;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_display;
+struct intel_dsb;
+
+bool intel_flipq_supported(struct intel_display *display);
+void intel_flipq_init(struct intel_display *display);
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe);
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state);
+void intel_flipq_disable(const struct intel_crtc_state *old_crtc_state);
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb);
+int intel_flipq_exec_time_us(struct intel_display *display);
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id);
+
+#endif /* __INTEL_FLIPQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index ba2f88ca6117..43be5377ddc1 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,7 +58,6 @@
#include <drm/drm_gem.h>
#include "i915_active.h"
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
@@ -72,7 +71,7 @@
/**
* frontbuffer_flush - flush frontbuffer
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -82,16 +81,14 @@
*
* Can be called without any locks held.
*/
-static void frontbuffer_flush(struct drm_i915_private *i915,
+static void frontbuffer_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct intel_display *display = &i915->display;
-
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->display.fb_tracking.lock);
- frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
+ frontbuffer_bits &= ~display->fb_tracking.busy_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -107,7 +104,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -117,19 +114,19 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
- i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&display->fb_tracking.lock);
+ display->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -137,22 +134,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
- i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= display->fb_tracking.flip_bits;
+ display->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -161,15 +158,15 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
void __intel_fb_invalidate(struct intel_frontbuffer *front,
@@ -198,7 +195,6 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -209,7 +205,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
}
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, origin);
+ frontbuffer_flush(display, frontbuffer_bits, origin);
}
static void intel_frontbuffer_flush_work(struct work_struct *work)
@@ -280,7 +276,7 @@ static void frontbuffer_release(struct kref *ref)
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct intel_display *display = to_intel_display(obj->dev);
struct intel_frontbuffer *front, *cur;
front = intel_bo_get_frontbuffer(obj);
@@ -300,9 +296,9 @@ intel_frontbuffer_get(struct drm_gem_object *obj)
I915_ACTIVE_RETIRE_SLEEPS);
INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
if (cur != front)
kfree(front);
return cur;
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6237780a9f68..2fee12eaf9b6 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -31,7 +31,7 @@
#include "i915_active_types.h"
struct drm_gem_object;
-struct drm_i915_private;
+struct intel_display;
enum fb_op_origin {
ORIGIN_CPU = 0,
@@ -68,11 +68,11 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 8a49e2bb37fa..000a898c9480 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -3,10 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/pci.h>
#include <linux/string.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index abf457e68ee9..0d73f32fe7f1 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -152,32 +153,31 @@ static const struct gmbus_pin gmbus_pins_mtp[] = {
static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display,
unsigned int pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG2) {
pins = gmbus_pins_dg2;
size = ARRAY_SIZE(gmbus_pins_dg2);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
pins = gmbus_pins_cnp;
size = ARRAY_SIZE(gmbus_pins_cnp);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
} else if (DISPLAY_VER(display) == 9) {
pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
pins = gmbus_pins_bdw;
size = ARRAY_SIZE(gmbus_pins_bdw);
} else {
@@ -240,11 +240,10 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(i915) && !IS_I845G(i915))
+ if (!display->platform.i830 && !display->platform.i845g)
reserved = intel_de_read_notrace(display, bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
@@ -314,11 +313,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_gmbus_reset(display);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
set_data(bus, 1);
@@ -332,12 +330,11 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
}
@@ -418,7 +415,7 @@ gmbus_wait_idle(struct intel_display *display)
add_wait_queue(&display->gmbus.wait_queue, &wait);
intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
@@ -630,14 +627,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, false);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, false);
retry:
@@ -748,9 +744,9 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, true);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, true);
return ret;
@@ -873,12 +869,11 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
display->gmbus.mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH(display))
/*
@@ -925,7 +920,7 @@ int intel_gmbus_setup(struct intel_display *display)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(i915))
+ if (display->platform.i830)
bus->force_bit = 1;
intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio));
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
index 59bad1dda6d6..ab750562566b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_GMBUS_REGS_H__
#define __INTEL_GMBUS_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bf424a822f3..42202c8bb066 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -14,20 +14,28 @@
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#include "intel_step.h"
+
+#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -136,7 +144,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.mst.active_links <= 1)
+ if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -248,8 +256,8 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(display)) {
- if (!intel_hdcp_gsc_check_status(display))
+ if (USE_HDCP_GSC(display)) {
+ if (!intel_hdcp_gsc_check_status(display->drm))
return false;
}
@@ -334,9 +342,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
- intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -349,7 +355,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
id = SKL_DISP_PW_1;
/* PG1 (power well #1) needs to be enabled */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
enabled = intel_display_power_well_is_enabled(display, id);
/*
@@ -370,7 +376,6 @@ static void intel_hdcp_clear_keys(struct intel_display *display)
static int intel_hdcp_load_keys(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 val;
@@ -395,7 +400,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* Mailbox interface.
*/
if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
- ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
@@ -1085,7 +1090,6 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
u64 value, bool update_property)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1106,7 +1110,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
}
@@ -2233,16 +2237,15 @@ static void intel_hdcp_check_work(struct work_struct *work)
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_connector_is_unregistered(&connector->base))
return;
if (!intel_hdcp2_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
@@ -2339,7 +2342,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct intel_display *display)
{
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
@@ -2363,7 +2366,7 @@ void intel_hdcp_component_init(struct intel_display *display)
display->hdcp.comp_added = true;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
ret = intel_hdcp_gsc_init(display);
else
ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
@@ -2433,7 +2436,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2492,7 +2494,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
if (!ret) {
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
check_link_interval);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
@@ -2563,7 +2565,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (!connector->hdcp.shim)
return;
@@ -2590,7 +2592,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
mutex_unlock(&hdcp->mutex);
}
@@ -2608,7 +2610,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
@@ -2638,7 +2640,7 @@ void intel_hdcp_component_fini(struct intel_display *display)
display->hdcp.comp_added = false;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
intel_hdcp_gsc_fini(display);
else
component_del(display->drm->dev, &i915_hdcp_ops);
@@ -2732,7 +2734,6 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!hdcp->shim)
return;
@@ -2740,7 +2741,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue);
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0);
}
static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 55965844d829..6a22862d6be1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -11,27 +11,22 @@
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct drm_i915_private *i915;
struct i915_vma *vma;
void *hdcp_cmd_in;
void *hdcp_cmd_out;
};
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
- drm_dbg_kms(display->drm,
+ drm_dbg_kms(&i915->drm,
"GSC components required for HDCP2.2 are not ready\n");
return false;
}
@@ -41,7 +36,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display)
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
- struct intel_hdcp_gsc_message *hdcp_message)
+ struct intel_hdcp_gsc_context *gsc_context)
{
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
@@ -78,9 +73,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
memset(cmd_in, 0, obj->base.size);
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
- hdcp_message->vma = vma;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->vma = vma;
+ gsc_context->i915 = i915;
return 0;
@@ -91,80 +87,37 @@ out_unpin:
return err;
}
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- display->hdcp.hdcp_message = hdcp_message;
- ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
-
- if (ret)
- drm_err(display->drm, "Could not initialize hdcp_message\n");
-
- return ret;
-}
-
-static void intel_hdcp_gsc_free_message(struct intel_display *display)
-{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
+ if (ret) {
+ drm_err(&i915->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
+ }
- hdcp_message->hdcp_cmd_in = NULL;
- hdcp_message->hdcp_cmd_out = NULL;
- i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
- kfree(hdcp_message);
+ return gsc_context;
}
-int intel_hdcp_gsc_init(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!gsc_context)
+ return;
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
-}
-
-void intel_hdcp_gsc_fini(struct intel_display *display)
-{
- intel_hdcp_gsc_free_message(display);
- kfree(display->hdcp.arbiter);
+ i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
+ kfree(gsc_context);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
@@ -211,18 +164,18 @@ static int intel_gsc_send_sync(struct drm_i915_private *i915,
/*
* This function can now be used for sending requests and will also handle
* receipt of reply messages hence no different function of message retrieval
- * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * is required. We will initialize intel_hdcp_gsc_context structure then add
* gsc cs memory header as stated in specs after which the normal HDCP payload
* will follow
*/
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct drm_i915_private *i915 = gsc_context->i915;
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_mtl_header *header_in, *header_out;
const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
- struct intel_hdcp_gsc_message *hdcp_message;
u64 addr_in, addr_out, host_session_id;
u32 reply_size, msg_size_in, msg_size_out;
int ret, tries = 0;
@@ -235,10 +188,9 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
msg_size_in = msg_in_len + sizeof(*header_in);
msg_size_out = msg_out_len + sizeof(*header_out);
- hdcp_message = i915->display.hdcp.hdcp_message;
- header_in = hdcp_message->hdcp_cmd_in;
- header_out = hdcp_message->hdcp_cmd_out;
- addr_in = i915_ggtt_offset(hdcp_message->vma);
+ header_in = gsc_context->hdcp_cmd_in;
+ header_out = gsc_context->hdcp_cmd_out;
+ addr_in = i915_ggtt_offset(gsc_context->vma);
addr_out = addr_in + PAGE_SIZE;
memset(header_in, 0, msg_size_in);
@@ -246,7 +198,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
get_random_bytes(&host_session_id, sizeof(u64));
intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
msg_size_in, host_session_id);
- memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
+ memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -280,7 +232,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
reply_size, (u32)msg_out_len);
}
- memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
+ memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
err:
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5695a5e4f609..9305c14aaffe 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -6,19 +6,17 @@
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
-#include <linux/err.h>
#include <linux/types.h>
-struct drm_i915_private;
-struct intel_display;
-struct intel_hdcp_gsc_message;
+struct drm_device;
+struct intel_hdcp_gsc_context;
-bool intel_hdcp_gsc_cs_required(struct intel_display *display);
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-int intel_hdcp_gsc_init(struct intel_display *display);
-void intel_hdcp_gsc_fini(struct intel_display *display);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+bool intel_hdcp_gsc_check_status(struct drm_device *drm);
+
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm);
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context);
#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 129104fa9b16..98967bb148e3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -4,20 +4,23 @@
*/
#include <linux/err.h>
+
+#include <drm/drm_print.h>
#include <drm/intel/i915_hdcp_interface.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
-int
+static int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
@@ -28,7 +31,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -41,9 +44,9 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_init_in,
sizeof(session_init_in),
- (u8 *)&session_init_out,
+ &session_init_out,
sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -64,7 +67,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
@@ -75,8 +78,8 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
@@ -87,7 +90,7 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -103,9 +106,9 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_rxcert_in,
sizeof(verify_rxcert_in),
- (u8 *)&verify_rxcert_out,
+ &verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
@@ -134,14 +137,14 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
@@ -152,7 +155,7 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -166,9 +169,9 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &send_hprime_in,
sizeof(send_hprime_in),
- (u8 *)&send_hprime_out,
+ &send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -184,14 +187,14 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
@@ -202,7 +205,7 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -217,9 +220,9 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &pairing_info_in,
sizeof(pairing_info_in),
- (u8 *)&pairing_info_out,
+ &pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -236,15 +239,15 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
return 0;
}
-int
+static int
intel_hdcp_gsc_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
@@ -255,7 +258,7 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -266,8 +269,8 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
- (u8 *)&lc_init_out, sizeof(lc_init_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &lc_init_in, sizeof(lc_init_in),
+ &lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -285,14 +288,14 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
@@ -303,7 +306,7 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -318,9 +321,9 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_lprime_in,
sizeof(verify_lprime_in),
- (u8 *)&verify_lprime_out,
+ &verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -337,14 +340,15 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data)
+static int
+intel_hdcp_gsc_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
@@ -355,7 +359,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -366,8 +370,8 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
- (u8 *)&get_skey_out, sizeof(get_skey_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &get_skey_in, sizeof(get_skey_in),
+ &get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -387,7 +391,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
@@ -397,8 +401,8 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
@@ -409,7 +413,7 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -430,9 +434,9 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_repeater_in,
sizeof(verify_repeater_in),
- (u8 *)&verify_repeater_out,
+ &verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -453,14 +457,15 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready)
+static int
+intel_hdcp_gsc_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -472,7 +477,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -499,8 +504,8 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
verify_mprime_in->k = cpu_to_be16(data->k);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
- (u8 *)&verify_mprime_out,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, verify_mprime_in, cmd_size,
+ &verify_mprime_out,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
@@ -518,13 +523,13 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data)
+static int intel_hdcp_gsc_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -535,7 +540,7 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -547,9 +552,9 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &enable_auth_in,
sizeof(enable_auth_in),
- (u8 *)&enable_auth_out,
+ &enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -565,13 +570,13 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -582,7 +587,7 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
@@ -594,9 +599,9 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_close_in,
sizeof(session_close_in),
- (u8 *)&session_close_out,
+ &session_close_out,
sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -611,3 +616,57 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct intel_display *display)
+{
+ struct intel_hdcp_gsc_context *gsc_context;
+ struct i915_hdcp_arbiter *arbiter;
+ int ret = 0;
+
+ arbiter = kzalloc(sizeof(*arbiter), GFP_KERNEL);
+ if (!arbiter)
+ return -ENOMEM;
+
+ mutex_lock(&display->hdcp.hdcp_mutex);
+
+ gsc_context = intel_hdcp_gsc_context_alloc(display->drm);
+ if (IS_ERR(gsc_context)) {
+ ret = PTR_ERR(gsc_context);
+ kfree(arbiter);
+ goto out;
+ }
+
+ display->hdcp.arbiter = arbiter;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ display->hdcp.gsc_context = gsc_context;
+
+out:
+ mutex_unlock(&display->hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct intel_display *display)
+{
+ intel_hdcp_gsc_context_free(display->hdcp.gsc_context);
+ display->hdcp.gsc_context = NULL;
+ kfree(display->hdcp.arbiter);
+ display->hdcp.arbiter = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
index 2d597f27e931..9f54157a4a3e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
@@ -6,68 +6,9 @@
#ifndef __INTEL_HDCP_GSC_MESSAGE_H__
#define __INTEL_HDCP_GSC_MESSAGE_H__
-#include <linux/types.h>
-
-struct device;
-struct drm_i915_private;
-struct hdcp_port_data;
-struct hdcp2_ake_init;
-struct hdcp2_ake_send_cert;
-struct hdcp2_ake_no_stored_km;
-struct hdcp2_ake_send_hprime;
-struct hdcp2_ake_send_pairing_info;
-struct hdcp2_lc_init;
-struct hdcp2_lc_send_lprime;
-struct hdcp2_ske_send_eks;
-struct hdcp2_rep_send_receiverid_list;
-struct hdcp2_rep_send_ack;
-struct hdcp2_rep_stream_ready;
struct intel_display;
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
-int
-intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
-int
-intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
-int
-intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
-int
-intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info *pairing_info);
-int
-intel_hdcp_gsc_initiate_locality_check(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
-int
-intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
-int
-intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data);
-int
-intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data);
+int intel_hdcp_gsc_init(struct intel_display *display);
+void intel_hdcp_gsc_fini(struct intel_display *display);
#endif /* __INTEL_HDCP_GSC_MESSAGE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index f590d7f48ba7..112ce8c896d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -247,7 +247,7 @@
_TRANSA_HDCP2_STREAM_STATUS, \
_TRANSB_HDCP2_STREAM_STATUS)
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
-#define STREAM_TYPE_STATUS REG_BIT(30)
+#define STREAM_TYPE_STATUS_MASK REG_GENMASK(30, 30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
@@ -263,7 +263,7 @@
#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
_TRANSA_HDCP2_AUTH_STREAM, \
_TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE REG_BIT(31)
+#define AUTH_STREAM_TYPE_MASK REG_GENMASK(31, 31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 33b8d5229db0..9961ff259298 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -38,14 +38,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
-
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -53,6 +52,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
@@ -64,6 +64,7 @@
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
+#include "intel_vrr.h"
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
@@ -714,7 +715,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int ret;
if (!crtc_state->has_infoframe)
@@ -723,7 +724,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, &connector->base,
adjusted_mode);
if (ret)
return false;
@@ -742,7 +743,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, &connector->base,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
@@ -768,7 +769,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -778,7 +779,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- if (IS_DGFX(i915))
+ if (display->platform.dgfx)
ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
else
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
@@ -978,7 +979,6 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -988,9 +988,9 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -1004,7 +1004,6 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1014,9 +1013,9 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return;
@@ -1028,9 +1027,9 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ if (display->platform.g4x || !crtc_state->has_infoframe)
return;
crtc_state->infoframes.enable |=
@@ -1538,7 +1537,6 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1557,7 +1555,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
* WA: To fix incorrect positioning of the window of
* opportunity and enc_en signalling in KABYLAKE.
*/
- if (IS_KABYLAKE(dev_priv) && enable)
+ if (display->platform.kabylake && enable)
return kbl_repositioning_enc_en_signal(connector,
cpu_transcoder);
@@ -1569,7 +1567,6 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
@@ -1582,15 +1579,15 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (ret)
return false;
- intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
+ intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1813,14 +1810,13 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || display->platform.alderlake_s)
max_tmds_clock = 600000;
else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || display->platform.haswell)
max_tmds_clock = 300000;
else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
@@ -1879,7 +1875,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct intel_display *display = to_intel_display(hdmi);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1889,16 +1884,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* GLK DPLL can't generate 446-480 MHz */
- if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ if (display->platform.geminilake && clock > 446666 && clock < 480000)
return MODE_CLOCK_RANGE;
/* BXT/GLK DPLL can't generate 223-240 MHz */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+ if (display->platform.cherryview && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
@@ -1942,11 +1937,12 @@ static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bp
}
}
-static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *_connector,
int bpc, bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- const struct drm_display_info *info = &connector->display_info;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_display_info *info = &connector->base.display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
@@ -1975,12 +1971,13 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
}
static enum drm_mode_status
-intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+intel_hdmi_mode_clock_valid(struct drm_connector *_connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1995,7 +1992,8 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
+ sink_format))
continue;
status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
@@ -2010,15 +2008,16 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
static enum drm_mode_status
-intel_hdmi_mode_valid(struct drm_connector *connector,
+intel_hdmi_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
- bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->base.state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2047,22 +2046,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > 600000)
return MODE_CLOCK_HIGH;
- ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
+ ycbcr_420_only = drm_mode_is_420_only(&connector->base.display_info, mode);
if (ycbcr_420_only)
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
- !connector->ycbcr_420_allowed ||
- !drm_mode_is_420_also(&connector->display_info, mode))
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->base.display_info, mode))
return status;
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink,
+ sink_format);
if (status != MODE_OK)
return status;
}
@@ -2073,16 +2073,16 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector_state *connector_state;
- struct drm_connector *connector;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_digital_connector_state *connector_state;
+ struct intel_connector *connector;
int i;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->uapi.crtc)
+ for_each_new_intel_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->base.crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink,
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
crtc_state->sink_format))
return false;
}
@@ -2210,7 +2210,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2218,7 +2218,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- return connector->display_info.has_audio;
+ return connector->base.display_info.has_audio;
else
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
@@ -2322,14 +2322,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_scdc *scdc = &connector->base.display_info.hdmi.scdc;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (!connector->interlace_allowed &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2384,6 +2384,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
conn_state);
@@ -2422,25 +2424,26 @@ void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
}
static void
-intel_hdmi_unset_edid(struct drm_connector *connector)
+intel_hdmi_unset_edid(struct drm_connector *_connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- drm_edid_free(to_intel_connector(connector)->detect_edid);
- to_intel_connector(connector)->detect_edid = NULL;
+ drm_edid_free(connector->detect_edid);
+ connector->detect_edid = NULL;
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
- struct i2c_adapter *ddc = connector->ddc;
+ struct i2c_adapter *ddc = connector->base.ddc;
enum drm_dp_dual_mode_type type;
type = drm_dp_dual_mode_detect(display->drm, ddc);
@@ -2455,7 +2458,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
* if the port is a dual mode capable DP port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- if (!connector->force &&
+ if (!connector->base.force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
@@ -2478,7 +2481,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || display->platform.haswell) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
@@ -2487,34 +2490,35 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector)
+intel_hdmi_set_edid(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct i2c_adapter *ddc = connector->ddc;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct i2c_adapter *ddc = connector->base.ddc;
intel_wakeref_t wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
intel_gmbus_force_bit(ddc, false);
}
/* Below we depend on display info having been updated */
- drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(&connector->base, drm_edid);
- to_intel_connector(connector)->detect_edid = drm_edid;
+ connector->detect_edid = drm_edid;
if (drm_edid_is_digital(drm_edid)) {
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(&connector->base);
connected = true;
}
@@ -2522,28 +2526,29 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
cec_notifier_set_phys_addr(intel_hdmi->cec_notifier,
- connector->display_info.source_physical_address);
+ connector->base.display_info.source_physical_address);
return connected;
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector, bool force)
+intel_hdmi_detect(struct drm_connector *_connector, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
enum drm_connector_status status = connector_status_disconnected;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
@@ -2551,9 +2556,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
!intel_digital_port_connected(encoder))
goto out;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (intel_hdmi_set_edid(connector))
+ if (intel_hdmi_set_edid(&connector->base))
status = connector_status_connected;
out:
@@ -2566,49 +2571,54 @@ out:
}
static void
-intel_hdmi_force(struct drm_connector *connector)
+intel_hdmi_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector);
+ intel_hdmi_set_edid(&connector->base);
}
-static int intel_hdmi_get_modes(struct drm_connector *connector)
+static int intel_hdmi_get_modes(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
+
/* drm_edid_connector_update() done in ->detect() or ->force() */
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
static int
-intel_hdmi_connector_register(struct drm_connector *connector)
+intel_hdmi_connector_register(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
return ret;
}
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *_connector)
{
- struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2624,15 +2634,16 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
+static int intel_hdmi_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
if (HAS_DDI(display))
- return intel_digital_connector_atomic_check(connector, state);
+ return intel_digital_connector_atomic_check(&connector->base, state);
else
- return g4x_hdmi_connector_atomic_check(connector, state);
+ return g4x_hdmi_connector_atomic_check(&connector->base, state);
}
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -2642,22 +2653,23 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
};
static void
-intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_hdmi);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
- intel_attach_aspect_ratio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
+ intel_attach_aspect_ratio_property(&connector->base);
- intel_attach_hdmi_colorspace_property(connector);
- drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(&connector->base);
+ drm_connector_attach_content_type_property(&connector->base);
if (DISPLAY_VER(display) >= 10)
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (!HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 8, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 8, 12);
}
/*
@@ -2679,25 +2691,26 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* True on success, false on failure.
*/
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
bool high_tmds_clock_ratio,
bool scrambling)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
- &connector->display_info.hdmi.scdc.scrambling;
+ &connector->base.display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
+ connector->base.base.id, connector->base.name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
- return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) &&
- drm_scdc_set_scrambling(connector, scrambling);
+ return drm_scdc_set_high_tmds_clock_ratio(&connector->base, high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(&connector->base, scrambling);
}
static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
@@ -2808,7 +2821,7 @@ static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
WARN_ON(encoder->port == PORT_C);
@@ -2819,7 +2832,7 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2828,7 +2841,6 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
drm_WARN_ON(display->drm, encoder->port == PORT_A);
@@ -2839,7 +2851,7 @@ static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2892,27 +2904,26 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (display->platform.alderlake_s)
ddc_pin = adls_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
- else if (IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.rocketlake)
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(display))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- HAS_PCH_TGP(dev_priv))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
+ HAS_PCH_TGP(display))
ddc_pin = mcc_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
ddc_pin = icl_encoder_to_ddc_pin(encoder);
- else if (HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_CNP(display))
ddc_pin = cnp_encoder_to_ddc_pin(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
ddc_pin = bxt_encoder_to_ddc_pin(encoder);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
ddc_pin = g4x_encoder_to_ddc_pin(encoder);
@@ -2986,15 +2997,13 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
dig_port->write_infoframe = vlv_write_infoframe;
dig_port->read_infoframe = vlv_read_infoframe;
dig_port->set_infoframes = vlv_set_infoframes;
dig_port->infoframes_enabled = vlv_infoframes_enabled;
- } else if (IS_G4X(dev_priv)) {
+ } else if (display->platform.g4x) {
dig_port->write_infoframe = g4x_write_infoframe;
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
@@ -3011,7 +3020,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = hsw_set_infoframes;
dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
- } else if (HAS_PCH_IBX(dev_priv)) {
+ } else if (HAS_PCH_IBX(display)) {
dig_port->write_infoframe = ibx_write_infoframe;
dig_port->read_infoframe = ibx_read_infoframe;
dig_port->set_infoframes = ibx_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 00d7b1ccf190..265aa97fcc75 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,7 +30,10 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
@@ -118,7 +121,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @display: display device
* @pin: the pin to gather stats on
* @long_hpd: whether the HPD IRQ was long or short
*
@@ -127,13 +130,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * stored in @display->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * &display->hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -145,10 +148,10 @@ intel_connector_hpd_pin(struct intel_connector *connector)
*
* Return true if an IRQ storm was detected on @pin.
*/
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct intel_display *display,
enum hpd_pin pin, bool long_hpd)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -156,7 +159,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -167,11 +170,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Received HPD interrupt on PIN %d - cnt: %d\n",
pin,
hpd->stats[pin].count);
@@ -180,56 +183,56 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
-static bool detection_work_enabled(struct drm_i915_private *i915)
+static bool detection_work_enabled(struct intel_display *display)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- return i915->display.hotplug.detection_work_enabled;
+ return display->hotplug.detection_work_enabled;
}
static bool
-mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
- return mod_delayed_work(i915->unordered_wq, work, delay);
+ return mod_delayed_work(display->wq.unordered, work, delay);
}
static bool
-queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
- return queue_delayed_work(i915->unordered_wq, work, delay);
+ return queue_delayed_work(display->wq.unordered, work, delay);
}
static bool
-queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
- return queue_work(i915->unordered_wq, work);
+ return queue_work(display->wq.unordered, work);
}
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -238,15 +241,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ display->hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -255,36 +258,35 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.reenable_work,
+ drm_kms_helper_poll_reschedule(display->drm);
+ mod_delayed_detection_work(display,
+ &display->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- display.hotplug.reenable_work.work);
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.reenable_work.work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ display->hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Reenabling HPD on connector %s\n",
connector->base.name);
connector->base.polled = connector->polled;
@@ -292,15 +294,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
+ display->hotplug.stats[pin].state = HPD_ENABLED;
}
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static enum intel_hotplug_state
@@ -349,32 +351,72 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
+static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ if (encoder->hpd_pin != pin)
+ continue;
+
+ if (intel_encoder_has_hpd_pulse(encoder))
+ return true;
+ }
+
+ return false;
+}
+
+static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
+{
+ lockdep_assert_held(&display->irq.lock);
+
+ return display->hotplug.stats[pin].blocked_count;
+}
+
+static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
+{
+ enum hpd_pin pin;
+ u32 hpd_pin_mask = 0;
+
+ for_each_hpd_pin(pin) {
+ if (hpd_pin_is_blocked(display, pin))
+ hpd_pin_mask |= BIT(pin);
+ }
+
+ return hpd_pin_mask;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
- u32 long_port_mask, short_port_mask;
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.dig_port_work);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ u32 long_hpd_pin_mask, short_hpd_pin_mask;
struct intel_encoder *encoder;
+ u32 blocked_hpd_pin_mask;
u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->display.hotplug.long_port_mask;
- dev_priv->display.hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->display.hotplug.short_port_mask;
- dev_priv->display.hotplug.short_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
+ short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
- enum port port = encoder->port;
+ enum hpd_pin pin = encoder->hpd_pin;
bool long_hpd, short_hpd;
enum irqreturn ret;
if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- long_hpd = long_port_mask & BIT(port);
- short_hpd = short_port_mask & BIT(port);
+ long_hpd = long_hpd_pin_mask & BIT(pin);
+ short_hpd = short_hpd_pin_mask & BIT(pin);
if (!long_hpd && !short_hpd)
continue;
@@ -384,16 +426,16 @@ static void i915_digport_work_func(struct work_struct *work)
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
/* fall back to old school hpd */
- old_bits |= BIT(encoder->hpd_pin);
+ old_bits |= BIT(pin);
}
}
if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.event_bits |= old_bits;
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
}
@@ -406,13 +448,17 @@ static void i915_digport_work_func(struct work_struct *work)
*/
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ struct intel_encoder *encoder = &dig_port->base;
+
+ spin_lock_irq(&display->irq.lock);
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
- spin_unlock_irq(&i915->irq_lock);
+ hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
+ if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
- queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+ spin_unlock_irq(&display->irq.lock);
}
/*
@@ -420,9 +466,9 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.hotplug_work.work);
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.hotplug_work.work);
+ struct intel_hotplug *hotplug = &display->hotplug;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
@@ -430,30 +476,32 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
struct drm_connector *first_changed_connector = NULL;
int changed_connectors = 0;
+ u32 blocked_hpd_pin_mask;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- hpd_event_bits = dev_priv->display.hotplug.event_bits;
- dev_priv->display.hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
- dev_priv->display.hotplug.retry_bits = 0;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
+ hotplug->event_bits &= ~hpd_event_bits;
+ hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
+ hotplug->retry_bits &= ~hpd_retry_bits;
/* Enable polling for connectors which had HPD IRQ storms */
- intel_hpd_irq_storm_switch_to_polling(dev_priv);
+ intel_hpd_irq_storm_switch_to_polling(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
- if (dev_priv->display.hotplug.ignore_long_hpd) {
- drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ if (display->hotplug.ignore_long_hpd) {
+ drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&display->drm->mode_config.mutex);
return;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -472,7 +520,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
else
connector->hotplug_retries++;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
connector->base.name, pin,
connector->hotplug_retries);
@@ -495,12 +543,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (changed_connectors == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed_connectors > 0)
- drm_kms_helper_hotplug_event(&dev_priv->drm);
+ drm_kms_helper_hotplug_event(display->drm);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
@@ -508,20 +556,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.retry_bits |= retry;
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.retry_bits |= retry;
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_detection_work(display,
+ &display->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
}
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @display: display device
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -535,7 +583,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask)
{
struct intel_encoder *encoder;
@@ -548,7 +596,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!pin_mask)
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
@@ -556,8 +604,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum port port = encoder->port;
+ for_each_intel_encoder(display->drm, encoder) {
bool long_hpd;
pin = encoder->hpd_pin;
@@ -569,18 +616,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
- queue_dig = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ display->hotplug.long_hpd_pin_mask |= BIT(pin);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ display->hotplug.short_hpd_pin_mask |= BIT(pin);
}
}
@@ -591,20 +640,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ if (display->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
"Received HPD interrupt on pin %d although disabled\n",
pin);
continue;
}
- if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ if (display->hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -615,13 +664,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->display.hotplug.event_bits |= BIT(pin);
+ display->hotplug.event_bits |= BIT(pin);
long_hpd = true;
- queue_hp = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
+ display->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -632,7 +683,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* happens later in our hotplug work.
*/
if (storm_detected)
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -641,17 +692,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
/**
* intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -663,40 +714,40 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct intel_display *display)
{
int i;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
for_each_hpd_pin(i) {
- dev_priv->display.hotplug.stats[i].count = 0;
- dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ display->hotplug.stats[i].count = 0;
+ display->hotplug.stats[i].state = HPD_ENABLED;
}
/*
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ intel_hpd_irq_setup(display);
+ spin_unlock_irq(&display->irq.lock);
}
-static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
+static void i915_hpd_poll_detect_connectors(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *first_changed_connector = NULL;
int changed = 0;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- if (!i915->drm.mode_config.poll_enabled)
+ if (!display->drm->mode_config.poll_enabled)
goto out;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -714,7 +765,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
out:
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!changed)
return;
@@ -722,25 +773,23 @@ out:
if (changed == 1)
drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
else
- drm_kms_helper_hotplug_event(&i915->drm);
+ drm_kms_helper_hotplug_event(display->drm);
drm_connector_put(&first_changed_connector->base);
}
static void i915_hpd_poll_init_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.poll_init_work);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.poll_init_work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
bool enabled;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+ enabled = READ_ONCE(display->hotplug.poll_enabled);
/*
* Prevent taking a power reference from this sequence of
* i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
@@ -750,14 +799,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
- drm_WARN_ON(&dev_priv->drm,
- READ_ONCE(dev_priv->display.hotplug.poll_enabled));
- cancel_work(&dev_priv->display.hotplug.poll_init_work);
+ drm_WARN_ON(display->drm,
+ READ_ONCE(display->hotplug.poll_enabled));
+ cancel_work(&display->hotplug.poll_init_work);
}
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -765,7 +814,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
@@ -776,19 +825,19 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (enabled)
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(display->drm);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled) {
- i915_hpd_poll_detect_connectors(dev_priv);
+ i915_hpd_poll_detect_connectors(display);
intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
@@ -798,7 +847,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
* intel_hpd_poll_enable - enable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -812,15 +861,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(display))
+ if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+ WRITE_ONCE(display->hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -828,15 +874,15 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
/**
* intel_hpd_poll_disable - disable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function disables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -853,26 +899,31 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
*/
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_disable(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ for_each_intel_dp(display->drm, encoder)
+ intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true);
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ WRITE_ONCE(display->hotplug.poll_enabled, false);
+
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
intel_connector_cancel_modeset_retry_work(connector);
intel_hdcp_cancel_works(connector);
@@ -880,157 +931,261 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_hpd_init_early(struct drm_i915_private *i915)
+void intel_hpd_init_early(struct intel_display *display)
{
- INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
+ INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&display->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
- i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
+ display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
}
-static bool cancel_all_detection_work(struct drm_i915_private *i915)
+static bool cancel_all_detection_work(struct intel_display *display)
{
bool was_pending = false;
- if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
was_pending = true;
- if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ if (cancel_work_sync(&display->hotplug.poll_init_work))
was_pending = true;
- if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
was_pending = true;
return was_pending;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
- dev_priv->display.hotplug.long_port_mask = 0;
- dev_priv->display.hotplug.short_port_mask = 0;
- dev_priv->display.hotplug.event_bits = 0;
- dev_priv->display.hotplug.retry_bits = 0;
+ display->hotplug.long_hpd_pin_mask = 0;
+ display->hotplug.short_hpd_pin_mask = 0;
+ display->hotplug.event_bits = 0;
+ display->hotplug.retry_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_work_sync(&display->hotplug.dig_port_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
- if (cancel_all_detection_work(dev_priv))
- drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
+ if (cancel_all_detection_work(display))
+ drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
}
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static void queue_work_for_missed_irqs(struct intel_display *display)
{
- bool ret = false;
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool queue_hp_work = false;
+ u32 blocked_hpd_pin_mask;
+ enum hpd_pin pin;
- if (pin == HPD_NONE)
- return false;
+ lockdep_assert_held(&display->irq.lock);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
- ret = true;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
+ queue_hp_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (display->hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_hp_work = true;
+ break;
+ case HPD_DISABLED:
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(display->hotplug.stats[pin].state);
+ }
}
- spin_unlock_irq(&dev_priv->irq_lock);
- return ret;
+ if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ if (queue_hp_work)
+ queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
}
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- if (pin == HPD_NONE)
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ hotplug->stats[pin].blocked_count++;
+
+ return hotplug->stats[pin].blocked_count == 1;
+}
+
+static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
+ return true;
+
+ hotplug->stats[pin].blocked_count--;
+
+ return hotplug->stats[pin].blocked_count == 0;
+}
+
+/**
+ * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to block the HPD handling for
+ *
+ * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
+ *
+ * On return:
+ *
+ * - It's guaranteed that the blocked encoders' HPD pulse handler
+ * (via intel_digital_port::hpd_pulse()) is not running.
+ * - The hotplug event handling (via intel_encoder::hotplug()) of an
+ * HPD IRQ pending at the time this function is called may be still
+ * running.
+ * - Detection on the encoder's connector (via
+ * drm_connector_helper_funcs::detect_ctx(),
+ * drm_connector_funcs::detect()) remains allowed, for instance as part of
+ * userspace connector probing, or DRM core's connector polling.
+ *
+ * The call must be followed by calling intel_hpd_unblock(), or
+ * intel_hpd_clear_and_unblock().
+ *
+ * Note that the handling of HPD IRQs for another encoder using the same HPD
+ * pin as that of @encoder will be also blocked.
+ */
+void intel_hpd_block(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool do_flush = false;
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&display->irq.lock);
+
+ if (block_hpd_pin(display, encoder->hpd_pin))
+ do_flush = true;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
+ flush_work(&hotplug->dig_port_work);
+}
+
+/**
+ * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be handled for @encoder and for any
+ * other encoder sharing the same HPD pin.
+ */
+void intel_hpd_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (encoder->hpd_pin == HPD_NONE)
return;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ if (unblock_hpd_pin(display, encoder->hpd_pin))
+ queue_work_for_missed_irqs(display);
+
+ spin_unlock_irq(&display->irq.lock);
}
-static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+/**
+ * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be cleared, handling only new IRQs.
+ */
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
{
- bool queue_work = false;
- enum hpd_pin pin;
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ enum hpd_pin pin = encoder->hpd_pin;
- lockdep_assert_held(&i915->irq_lock);
+ if (pin == HPD_NONE)
+ return;
- if (i915->display.hotplug.event_bits ||
- i915->display.hotplug.retry_bits)
- queue_work = true;
+ spin_lock_irq(&display->irq.lock);
- for_each_hpd_pin(pin) {
- switch (i915->display.hotplug.stats[pin].state) {
- case HPD_MARK_DISABLED:
- queue_work = true;
- break;
- case HPD_ENABLED:
- break;
- default:
- MISSING_CASE(i915->display.hotplug.stats[pin].state);
- }
+ if (unblock_hpd_pin(display, pin)) {
+ hotplug->event_bits &= ~BIT(pin);
+ hotplug->retry_bits &= ~BIT(pin);
+ hotplug->short_hpd_pin_mask &= ~BIT(pin);
+ hotplug->long_hpd_pin_mask &= ~BIT(pin);
}
- if (queue_work)
- queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_enable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = true;
- queue_work_for_missed_irqs(i915);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_disable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = false;
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&display->irq.lock);
- cancel_all_detection_work(i915);
+ cancel_all_detection_work(display);
}
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+bool intel_hpd_schedule_detection(struct intel_display *display)
{
unsigned long flags;
bool ret;
- spin_lock_irqsave(&i915->irq_lock, flags);
- ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
- spin_unlock_irqrestore(&i915->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
+ ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+ flush_work(&display->hotplug.dig_port_work);
+ flush_delayed_work(&display->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1044,8 +1199,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1070,21 +1225,21 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
return -EINVAL;
if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting HPD storm detection threshold to %d\n",
new_threshold);
else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+ drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1105,10 +1260,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+ str_yes_no(display->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1125,8 +1280,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
char *newline;
char tmp[16];
int i;
@@ -1147,22 +1302,22 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
+ new_state = !HAS_DP_MST(display);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1176,14 +1331,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.write = i915_hpd_short_storm_ctl_write,
};
-void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_storm_ctl_fops);
+ display, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_short_storm_ctl_fops);
+ display, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
- &i915->display.hotplug.ignore_long_hpd);
+ &display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index d6986902b054..edc41c9d3d65 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,30 +8,31 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum port;
struct intel_connector;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
-enum port;
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
+void intel_hpd_poll_enable(struct intel_display *display);
+void intel_hpd_poll_disable(struct intel_display *display);
+void intel_hpd_poll_fini(struct intel_display *display);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_early(struct drm_i915_private *i915);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init(struct intel_display *display);
+void intel_hpd_init_early(struct intel_display *display);
+void intel_hpd_cancel_work(struct intel_display *display);
enum hpd_pin intel_hpd_pin_default(enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_block(struct intel_encoder *encoder);
+void intel_hpd_unblock(struct intel_encoder *encoder);
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder);
+void intel_hpd_debugfs_register(struct intel_display *display);
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct intel_display *display);
+void intel_hpd_disable_detection_work(struct intel_display *display);
+bool intel_hpd_schedule_detection(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2137ac7b882a..43aee70597bf 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -3,10 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
@@ -131,68 +134,67 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = {
[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
};
-static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+static void intel_hpd_init_pins(struct intel_display *display)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
- if (HAS_GMCH(dev_priv)) {
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
hpd->hpd = hpd_status_g4x;
else
hpd->hpd = hpd_status_i915;
return;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
hpd->hpd = hpd_xelpdp;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
hpd->hpd = hpd_bxt;
- else if (DISPLAY_VER(dev_priv) == 9)
+ else if (DISPLAY_VER(display) == 9)
hpd->hpd = NULL; /* no north HPD on SKL */
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(display) >= 8)
hpd->hpd = hpd_bdw;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
- if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
- (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
+ if ((INTEL_PCH_TYPE(display) < PCH_DG1) &&
+ (!HAS_PCH_SPLIT(display) || HAS_PCH_NOP(display)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
- else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ else if (HAS_PCH_CNP(display) || HAS_PCH_SPT(display))
hpd->pch_hpd = hpd_spt;
- else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_LPT(display) || HAS_PCH_CPT(display))
hpd->pch_hpd = hpd_cpt;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
hpd->pch_hpd = hpd_ibx;
else
- MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+ MISSING_CASE(INTEL_PCH_TYPE(display));
}
/* For display hotplug interrupt */
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits)
{
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, bits & ~mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, bits & ~mask);
- intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask,
- bits);
+ intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @display: display device
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
@@ -202,13 +204,13 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask,
u32 bits)
{
- spin_lock_irq(&dev_priv->irq_lock);
- i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_hotplug_interrupt_update_locked(display, mask, bits);
+ spin_unlock_irq(&display->irq.lock);
}
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
@@ -339,7 +341,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct intel_display *display,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
@@ -359,37 +361,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- drm_dbg(&dev_priv->drm,
- "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg_kms(display->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ for_each_intel_encoder(display->drm, encoder)
+ if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_hotplug_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 hotplug_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug_irqs |= hpd[encoder->hpd_pin];
return hotplug_irqs;
}
-static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_mask(struct intel_display *display,
hotplug_mask_func hotplug_mask)
{
enum hpd_pin pin;
@@ -401,25 +403,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
return hotplug;
}
-static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_enables(struct intel_display *display,
hotplug_enables_func hotplug_enables)
{
struct intel_encoder *encoder;
u32 hotplug = 0;
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug |= hotplug_enables(encoder);
return hotplug;
}
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+u32 i9xx_hpd_irq_ack(struct intel_display *display)
{
u32 hotplug_status = 0, hotplug_status_mask;
int i;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
else
@@ -435,53 +437,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = intel_uncore_read(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask;
+ u32 tmp = intel_de_read(display,
+ PORT_HOTPLUG_STAT(display)) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- intel_uncore_write(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv),
- hotplug_status);
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
+ hotplug_status);
}
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv)));
+ intel_de_read(display, PORT_HOTPLUG_STAT(display)));
return hotplug_status;
}
-void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status)
{
- struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
else
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
- if ((IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
intel_dp_aux_irq_handler(display);
}
-void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -491,7 +491,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -500,63 +500,61 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
dig_hotplug_reg &= ~mask;
}
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 pin_mask = 0, long_mask = 0;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;
- if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ if (!(display->hotplug.hpd[pin] & hotplug_trigger))
continue;
pin_mask |= BIT(pin);
- val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
- intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+ val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val);
if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
long_mask |= BIT(pin);
}
if (pin_mask) {
- drm_dbg(&i915->drm,
- "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, pin_mask, long_mask);
+ drm_dbg_kms(display->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
- intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
if (trigger_aux)
intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
}
-void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void icp_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -565,37 +563,36 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 dig_hotplug_reg;
/* Locking due to DSI native GPIO sequences */
- spin_lock(&dev_priv->irq_lock);
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0);
+ spin_unlock(&display->irq.lock);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
intel_gmbus_irq_handler(display);
}
-void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void spt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -604,61 +601,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(display);
}
-void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir)
{
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -667,29 +664,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
else
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected DE HPD interrupt 0x%08x\n", iir);
}
@@ -711,7 +708,7 @@ static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
switch (encoder->hpd_pin) {
case HPD_PORT_A:
@@ -719,7 +716,7 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- return HAS_PCH_LPT_LP(i915) ?
+ return HAS_PCH_LPT_LP(display) ?
PORTA_HOTPLUG_ENABLE : 0;
case HPD_PORT_B:
return PORTB_HOTPLUG_ENABLE |
@@ -735,37 +732,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, ibx_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ibx_hotplug_enables));
}
static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- ibx_hotplug_mask(encoder->hpd_pin),
- ibx_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+ ibx_hpd_detection_setup(display);
}
static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -806,36 +803,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
return icp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables));
}
static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
- icp_ddi_hotplug_mask(encoder->hpd_pin),
- icp_ddi_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
}
-static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables));
}
static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
- icp_tc_hotplug_mask(encoder->hpd_pin),
- icp_tc_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
}
static void icp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -844,23 +841,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder)
icp_tc_hpd_enable_detection(encoder);
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* We reduce the value to 250us to be able to detect SHPD when an external display
* is connected. This is also expected of us as stated in DP1.4a Table 3-4.
*/
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- icp_ddi_hpd_detection_setup(dev_priv);
- icp_tc_hpd_detection_setup(dev_priv);
+ icp_ddi_hpd_detection_setup(display);
+ icp_tc_hpd_detection_setup(display);
}
static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
@@ -883,88 +880,88 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
return gen11_hotplug_mask(encoder->hpd_pin);
}
-static void dg1_hpd_invert(struct drm_i915_private *i915)
+static void dg1_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- dg1_hpd_invert(i915);
+ dg1_hpd_invert(display);
icp_hpd_enable_detection(encoder);
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_irq_setup(struct intel_display *display)
{
- dg1_hpd_invert(dev_priv);
- icp_hpd_irq_setup(dev_priv);
+ dg1_hpd_invert(display);
+ icp_hpd_irq_setup(display);
}
-static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
-static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tbt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
gen11_tc_hpd_enable_detection(encoder);
gen11_tbt_hpd_enable_detection(encoder);
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
icp_hpd_enable_detection(encoder);
}
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
- ~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
+ intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_de_posting_read(display, GEN11_DE_HPD_IMR);
- gen11_tc_hpd_detection_setup(dev_priv);
- gen11_tbt_hpd_detection_setup(dev_priv);
+ gen11_tc_hpd_detection_setup(display);
+ gen11_tbt_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_hpd_irq_setup(display);
}
static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1001,39 +998,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
return mtp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables));
}
static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_ddi_hotplug_mask(encoder->hpd_pin),
mtp_ddi_hotplug_enables(encoder));
}
-static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables));
}
static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
-static void mtp_hpd_invert(struct drm_i915_private *i915)
+static void mtp_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
@@ -1044,49 +1041,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915)
INVERT_TC4_HPD |
INVERT_DDID_HPD_MTP |
INVERT_DDIE_HPD);
- intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mtp_hpd_invert(i915);
+ mtp_hpd_invert(display);
mtp_ddi_hpd_enable_detection(encoder);
mtp_tc_hpd_enable_detection(encoder);
}
-static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+static void mtp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
* SHPD_FILTER_CNT value should be.
*/
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- mtp_hpd_invert(i915);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ mtp_hpd_invert(display);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
-static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915)
+static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
@@ -1094,7 +1091,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
}
-static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display,
enum hpd_pin hpd_pin, bool enable)
{
u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
@@ -1103,18 +1100,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
if (!is_xelpdp_pica_hpd_pin(hpd_pin))
return;
- intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
mask, enable ? mask : 0);
}
static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+ _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true);
}
-static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+static void xelpdp_pica_hpd_detection_setup(struct intel_display *display)
{
struct intel_encoder *encoder;
u32 available_pins = 0;
@@ -1122,11 +1119,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
available_pins |= BIT(encoder->hpd_pin);
for_each_hpd_pin(pin)
- _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+ _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin));
}
static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -1135,23 +1132,23 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
mtp_hpd_enable_detection(encoder);
}
-static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+static void xelpdp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs,
~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+ intel_de_posting_read(display, PICAINTERRUPT_IMR);
- xelpdp_pica_hpd_detection_setup(i915);
+ xelpdp_pica_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
- xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
- mtp_hpd_irq_setup(i915);
+ if (INTEL_PCH_TYPE(display) >= PCH_LNL)
+ xe2lpd_sde_hpd_irq_setup(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_MTL)
+ mtp_hpd_irq_setup(display);
}
static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1190,57 +1187,57 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
return spt_hotplug2_mask(encoder->hpd_pin);
}
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct intel_display *display)
{
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(dev_priv)) {
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, spt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(display, spt_hotplug2_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug2_enables));
}
static void spt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(i915)) {
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
- CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- spt_hotplug_mask(encoder->hpd_pin),
- spt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
- spt_hotplug2_mask(encoder->hpd_pin),
- spt_hotplug2_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP)
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+ spt_hpd_detection_setup(display);
}
static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1265,44 +1262,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ intel_hpd_hotplug_mask(display, ilk_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ilk_hotplug_enables));
}
static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- ilk_hotplug_mask(encoder->hpd_pin),
- ilk_hotplug_enables(encoder));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
ibx_hpd_enable_detection(encoder);
}
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- if (DISPLAY_VER(dev_priv) >= 8)
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ if (DISPLAY_VER(display) >= 8)
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
else
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ ilk_update_display_irq(display, hotplug_irqs, enabled_irqs);
- ilk_hpd_detection_setup(dev_priv);
+ ilk_hpd_detection_setup(display);
- ibx_hpd_irq_setup(dev_priv);
+ ibx_hpd_irq_setup(display);
}
static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1344,80 +1341,80 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, bxt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, bxt_hotplug_enables));
}
static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- bxt_hotplug_mask(encoder->hpd_pin),
- bxt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
- bxt_hpd_detection_setup(dev_priv);
+ bxt_hpd_detection_setup(display);
}
-static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+static void g45_hpd_peg_band_gap_wa(struct intel_display *display)
{
/*
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+ intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd);
}
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
- if (IS_G45(i915))
- g45_hpd_peg_band_gap_wa(i915);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* HPD sense and interrupt enable are one and the same */
- i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+ i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en);
}
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
/*
* Note HDMI and DP share hotplug bits. Enable bits are the same for all
* generations.
*/
- hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915);
/*
* Programming the CRT detection parameters tends to generate a spurious
* hotplug event about three seconds later. So just do it once.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- if (IS_G45(dev_priv))
- g45_hpd_peg_band_gap_wa(dev_priv);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* Ignore TV since it's buggy */
- i915_hotplug_interrupt_update_locked(dev_priv,
+ i915_hotplug_interrupt_update_locked(display,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -1426,7 +1423,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
struct intel_hotplug_funcs {
/* Enable HPD sense and interrupts for all present encoders */
- void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ void (*hpd_irq_setup)(struct intel_display *display);
/* Enable HPD sense for a single encoder */
void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
@@ -1449,47 +1446,47 @@ HPD_FUNCS(ilk);
void intel_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_enable_detection(encoder);
}
-void intel_hpd_irq_setup(struct drm_i915_private *i915)
+void intel_hpd_irq_setup(struct intel_display *display)
{
- if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
- !i915->display.irq.vlv_display_irqs_enabled)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled)
return;
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_irq_setup(i915);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_irq_setup(display);
}
-void intel_hotplug_irq_init(struct drm_i915_private *i915)
+void intel_hotplug_irq_init(struct intel_display *display)
{
- intel_hpd_init_pins(i915);
+ intel_hpd_init_pins(display);
- intel_hpd_init_early(i915);
+ intel_hpd_init_early(display);
- if (HAS_GMCH(i915)) {
- if (I915_HAS_HOTPLUG(i915))
- i915->display.funcs.hotplug = &i915_hpd_funcs;
+ if (HAS_GMCH(display)) {
+ if (HAS_HOTPLUG(display))
+ display->funcs.hotplug = &i915_hpd_funcs;
} else {
- if (HAS_PCH_DG2(i915))
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (HAS_PCH_DG1(i915))
- i915->display.funcs.hotplug = &dg1_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 14)
- i915->display.funcs.hotplug = &xelpdp_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 11)
- i915->display.funcs.hotplug = &gen11_hpd_funcs;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.funcs.hotplug = &bxt_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
- i915->display.funcs.hotplug = &spt_hpd_funcs;
+ if (HAS_PCH_DG2(display))
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (HAS_PCH_DG1(display))
+ display->funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 14)
+ display->funcs.hotplug = &xelpdp_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 11)
+ display->funcs.hotplug = &gen11_hpd_funcs;
+ else if (display->platform.geminilake || display->platform.broxton)
+ display->funcs.hotplug = &bxt_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ display->funcs.hotplug = &spt_hpd_funcs;
else
- i915->display.funcs.hotplug = &ilk_hpd_funcs;
+ display->funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
index e4db752df096..9063bb02a2e9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
struct intel_encoder;
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915);
+u32 i9xx_hpd_irq_ack(struct intel_display *display);
-void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status);
-void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir);
-void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir);
-void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
-void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status);
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir);
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir);
+void icp_irq_handler(struct intel_display *display, u32 pch_iir);
+void spt_irq_handler(struct intel_display *display, u32 pch_iir);
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits);
-void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask, u32 bits);
void intel_hpd_enable_detection(struct intel_encoder *encoder);
-void intel_hpd_irq_setup(struct drm_i915_private *i915);
+void intel_hpd_irq_setup(struct intel_display *display);
-void intel_hotplug_irq_init(struct drm_i915_private *i915);
+void intel_hotplug_irq_init(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index fb6b84f6a81d..dc454420c134 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hti_regs.h b/drivers/gpu/drm/i915/display/intel_hti_regs.h
index e206f2837fc8..39c046bd351c 100644
--- a/drivers/gpu/drm/i915/display/intel_hti_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hti_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_HTI_REGS_H__
#define __INTEL_HTI_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index a10cd3992607..3caef7f9c7c4 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -3,6 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/int_log.h>
+#include <linux/math.h>
+
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -10,11 +15,33 @@
#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ int force_bpp_x16 = INT_MAX;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->base.crtc != &crtc->base)
+ continue;
+
+ if (!connector->link.force_bpp_x16)
+ continue;
+
+ force_bpp_x16 = min(force_bpp_x16, connector->link.force_bpp_x16);
+ }
+
+ return force_bpp_x16 < INT_MAX ? force_bpp_x16 : 0;
+}
+
/**
* intel_link_bw_init_limits - initialize BW limits
* @state: Atomic state
@@ -31,9 +58,10 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- intel_crtc_for_pipe(display, pipe));
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int forced_bpp_x16 = get_forced_link_bpp_x16(state, crtc);
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
@@ -42,15 +70,19 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
+
+ if (forced_bpp_x16)
+ limits->max_bpp_x16[pipe] = min(limits->max_bpp_x16[pipe], forced_bpp_x16);
}
}
/**
- * intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
+ * __intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
* @state: atomic state
* @limits: link BW limits
* @pipe_mask: mask of pipes to select from
* @reason: explanation of why bpp reduction is needed
+ * @reduce_forced_bpp: allow reducing bpps below their forced link bpp
*
* Select the pipe from @pipe_mask with the biggest link bpp value and set the
* maximum of link bpp in @limits below this value. Modeset the selected pipe,
@@ -64,10 +96,11 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
* - %-ENOSPC if no pipe can further reduce its link bpp
* - Other negative error, if modesetting the selected pipe failed
*/
-int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
- struct intel_link_bw_limits *limits,
- u8 pipe_mask,
- const char *reason)
+static int __intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason,
+ bool reduce_forced_bpp)
{
struct intel_display *display = to_intel_display(state);
enum pipe max_bpp_pipe = INVALID_PIPE;
@@ -97,6 +130,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
*/
link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp);
+ if (!reduce_forced_bpp &&
+ link_bpp_x16 <= get_forced_link_bpp_x16(state, crtc))
+ continue;
+
if (link_bpp_x16 > max_bpp_x16) {
max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
@@ -112,6 +149,21 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
BIT(max_bpp_pipe));
}
+int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason)
+{
+ int ret;
+
+ /* Try to keep any forced link BPP. */
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, false);
+ if (ret == -ENOSPC)
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, true);
+
+ return ret;
+}
+
/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
@@ -245,3 +297,176 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state,
return -EAGAIN;
}
+
+static int force_link_bpp_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+
+ seq_printf(m, FXP_Q4_FMT "\n", FXP_Q4_ARGS(connector->link.force_bpp_x16));
+
+ return 0;
+}
+
+static int str_to_fxp_q4_nonneg_int(const char *str, int *val_x16)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(str, 10, &val);
+ if (err)
+ return err;
+
+ if (val > INT_MAX >> 4)
+ return -ERANGE;
+
+ *val_x16 = fxp_q4_from_int(val);
+
+ return 0;
+}
+
+/* modifies str */
+static int str_to_fxp_q4_nonneg(char *str, int *val_x16)
+{
+ const char *int_str;
+ char *frac_str;
+ int frac_digits;
+ int frac_val;
+ int err;
+
+ int_str = strim(str);
+ frac_str = strchr(int_str, '.');
+
+ if (frac_str)
+ *frac_str++ = '\0';
+
+ err = str_to_fxp_q4_nonneg_int(int_str, val_x16);
+ if (err)
+ return err;
+
+ if (!frac_str)
+ return 0;
+
+ /* prevent negative number/leading +- sign mark */
+ if (!isdigit(*frac_str))
+ return -EINVAL;
+
+ err = str_to_fxp_q4_nonneg_int(frac_str, &frac_val);
+ if (err)
+ return err;
+
+ frac_digits = strlen(frac_str);
+ if (frac_digits > intlog10(INT_MAX) >> 24 ||
+ frac_val > INT_MAX - int_pow(10, frac_digits) / 2)
+ return -ERANGE;
+
+ frac_val = DIV_ROUND_CLOSEST(frac_val, (int)int_pow(10, frac_digits));
+
+ if (*val_x16 > INT_MAX - frac_val)
+ return -ERANGE;
+
+ *val_x16 += frac_val;
+
+ return 0;
+}
+
+static int user_str_to_fxp_q4_nonneg(const char __user *ubuf, size_t len, int *val_x16)
+{
+ char *kbuf;
+ int err;
+
+ kbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ err = str_to_fxp_q4_nonneg(kbuf, val_x16);
+
+ kfree(kbuf);
+
+ return err;
+}
+
+static bool connector_supports_dsc(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ return intel_dp_has_dsc(connector);
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return HAS_DSC_MST(display);
+
+ return HAS_DSC(display);
+ default:
+ return false;
+ }
+}
+
+static ssize_t
+force_link_bpp_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ int min_bpp;
+ int bpp_x16;
+ int err;
+
+ err = user_str_to_fxp_q4_nonneg(ubuf, len, &bpp_x16);
+ if (err)
+ return err;
+
+ /* TODO: Make the non-DSC min_bpp value connector specific. */
+ if (connector_supports_dsc(connector))
+ min_bpp = intel_dp_dsc_min_src_compressed_bpp();
+ else
+ min_bpp = intel_display_min_pipe_bpp();
+
+ if (bpp_x16 &&
+ (bpp_x16 < fxp_q4_from_int(min_bpp) ||
+ bpp_x16 > fxp_q4_from_int(intel_display_max_pipe_bpp(display))))
+ return -EINVAL;
+
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
+ if (err)
+ return err;
+
+ connector->link.force_bpp_x16 = bpp_x16;
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+
+ *offp += len;
+
+ return len;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_link_bpp);
+
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DVID:
+ if (HAS_FDI(display))
+ break;
+
+ return;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (HAS_FDI(display) && !HAS_DDI(display))
+ break;
+
+ return;
+ default:
+ return;
+ }
+
+ debugfs_create_file("intel_force_link_bpp", 0644, root,
+ connector, &force_link_bpp_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e69049cf178f..b499042e62b1 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -11,6 +11,7 @@
#include "intel_display_limits.h"
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
@@ -32,5 +33,6 @@ bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
enum pipe pipe);
int intel_link_bw_atomic_check(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits);
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index 86cc03a4413c..aad52d0d83e1 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 59551c8414c2..666148a14522 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -179,7 +179,7 @@ static int lpe_audio_irq_init(struct intel_display *display)
handle_simple_irq,
"hdmi_lpe_audio_irq_handler");
- return irq_set_chip_data(irq, dev_priv);
+ return 0;
}
static bool lpe_audio_detect(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 63c1afa30b05..abc4b562083d 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,10 +27,11 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 19f52d1659fa..7e48a235c99f 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,10 +37,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -84,15 +83,15 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *i915,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(i915, lvds_reg);
+ val = intel_de_read(display, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(i915))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
@@ -104,7 +103,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -113,7 +111,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe);
intel_display_power_put(display, encoder->power_domain, wakeref);
@@ -123,13 +121,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ tmp = intel_de_read(display, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -141,13 +139,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.flags |= flags;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (DISPLAY_VER(dev_priv) < 4) {
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
+ if (DISPLAY_VER(display) < 4) {
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
@@ -155,24 +153,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- pps->powerdown_on_reset = intel_de_read(dev_priv,
- PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(display,
+ PP_CONTROL(display, 0)) & PANEL_POWER_RESET;
- val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_ON_DELAYS(display, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_OFF_DELAYS(display, 0));
pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
+ val = intel_de_read(display, PP_DIVISOR(display, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -185,12 +183,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->delays.power_cycle = val * 1000;
- if (DISPLAY_VER(dev_priv) < 5 &&
+ if (DISPLAY_VER(display) < 5 &&
pps->delays.power_up == 0 &&
pps->delays.backlight_on == 0 &&
pps->delays.power_down == 0 &&
pps->delays.backlight_off == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
@@ -201,7 +199,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
+ drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
pps->delays.power_up, pps->delays.power_down,
pps->delays.power_cycle, pps->delays.backlight_on,
@@ -209,28 +207,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->port, pps->powerdown_on_reset);
}
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0));
- drm_WARN_ON(&dev_priv->drm,
+ val = intel_de_read(display, PP_CONTROL(display, 0));
+ drm_WARN_ON(display->drm,
(val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val);
+ intel_de_write(display, PP_CONTROL(display, 0), val);
- intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_ON_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
- intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_OFF_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
- intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
+ intel_de_write(display, PP_DIVISOR(display, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
@@ -243,25 +241,24 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
- assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
+ assert_dpll_disabled(display, crtc_state->intel_dpll);
} else {
assert_pll_disabled(display, pipe);
}
- intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(i915)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -296,7 +293,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -312,7 +309,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(i915, lvds_encoder->reg, temp);
+ intel_de_write(display, lvds_encoder->reg, temp);
}
/*
@@ -323,16 +320,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
+ intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
+ intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ drm_err(display->drm,
"timed out waiting for panel to power on\n");
intel_backlight_enable(crtc_state, conn_state);
@@ -343,16 +340,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000))
- drm_err(&dev_priv->drm,
+ intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ drm_err(display->drm,
"timed out waiting for panel to power off\n");
- intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0);
+ intel_de_posting_read(display, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_atomic_state *state,
@@ -384,10 +381,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -420,7 +417,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *connector = lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -429,12 +426,12 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
- drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) {
+ drm_err(display->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -447,7 +444,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
crtc_state->pipe_bpp, lvds_bpp);
crtc_state->pipe_bpp = lvds_bpp;
@@ -775,11 +772,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -787,24 +784,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
+bool intel_is_dual_link_lvds(struct intel_display *display)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(display);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
+ struct intel_display *display = to_intel_display(&lvds_encoder->base);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (i915->display.params.lvds_channel_mode > 0)
- return i915->display.params.lvds_channel_mode == 2;
+ if (display->params.lvds_channel_mode > 0)
+ return display->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -819,8 +816,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(i915, lvds_encoder->reg);
- if (HAS_PCH_CPT(i915))
+ val = intel_de_read(display, lvds_encoder->reg);
+ if (HAS_PCH_CPT(display))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -837,14 +834,13 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @i915: i915 device
+ * @display: display device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *i915)
+void intel_lvds_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -855,25 +851,25 @@ void intel_lvds_init(struct drm_i915_private *i915)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
+ drm_WARN(display->drm, !display->vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!i915->display.vbt.int_lvds_support) {
- drm_dbg_kms(&i915->drm,
+ if (!display->vbt.int_lvds_support) {
+ drm_dbg_kms(display->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(i915, lvds_reg);
+ lvds = intel_de_read(display, lvds_reg);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
@@ -881,11 +877,11 @@ void intel_lvds_init(struct drm_i915_private *i915)
ddc_pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -902,18 +898,18 @@ void intel_lvds_init(struct drm_i915_private *i915)
lvds_encoder->attached_connector = connector;
encoder = &lvds_encoder->base;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
encoder->enable = intel_enable_lvds;
encoder->pre_enable = intel_pre_enable_lvds;
encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
encoder->disable = pch_disable_lvds;
encoder->post_disable = pch_post_disable_lvds;
} else {
@@ -931,7 +927,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = PORT_NONE;
encoder->cloneable = 0;
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
encoder->pipe_mask = BIT(PIPE_B);
else
encoder->pipe_mask = ~0;
@@ -943,7 +939,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -958,7 +954,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc);
else
@@ -991,7 +987,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
if (!intel_panel_preferred_fixed_mode(connector))
intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(connector))
@@ -1002,7 +998,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1010,7 +1006,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
return;
failed:
- drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(&connector->base);
drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index 7ad5fa9c0434..a6db1706a97c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -11,28 +11,28 @@
#include "i915_reg_defs.h"
enum pipe;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct intel_display *display);
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display);
+bool intel_is_dual_link_lvds(struct intel_display *display);
#else
-static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_lvds_init(struct drm_i915_private *dev_priv)
+static inline void intel_lvds_init(struct intel_display *display)
{
}
-static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
return NULL;
}
-static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+static inline bool intel_is_dual_link_lvds(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 312b21b1ab59..8415f3d703ed 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -6,11 +6,11 @@
* state.
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
@@ -23,6 +23,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_fifo_underrun.h"
@@ -31,13 +32,14 @@
#include "intel_pmdemand.h"
#include "intel_tc.h"
#include "intel_vblank.h"
+#include "intel_vga.h"
#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -48,7 +50,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -56,9 +58,9 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to disable [CRTC:%d:%s], out of memory",
crtc->base.base.id, crtc->base.name);
return;
@@ -68,7 +70,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
to_intel_atomic_state(state)->internal = true;
/* Everything's already locked, -EDEADLK can't happen. */
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
BIT(pipe) |
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *temp_crtc_state =
@@ -77,24 +79,24 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base);
- drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(display->drm, IS_ERR(temp_crtc_state) || ret);
}
- i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
+ display->funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.base.id, crtc->base.name);
crtc->active = false;
crtc->base.enabled = false;
- if (crtc_state->shared_dpll)
- intel_unreference_shared_dpll_crtc(crtc,
- crtc_state->shared_dpll,
- &crtc_state->shared_dpll->state);
+ if (crtc_state->intel_dpll)
+ intel_dpll_crtc_put(crtc,
+ crtc_state->intel_dpll,
+ &crtc_state->intel_dpll->state);
}
static void set_encoder_for_connector(struct intel_connector *connector,
@@ -118,13 +120,12 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.encoder != &encoder->base)
continue;
@@ -143,10 +144,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
static void reset_crtc_encoder_state(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) {
+ for_each_encoder_on_crtc(display->drm, &crtc->base, encoder) {
reset_encoder_connector_state(encoder);
encoder->base.crtc = NULL;
}
@@ -155,9 +156,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -169,7 +169,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
reset_crtc_encoder_state(crtc);
intel_fbc_disable(crtc);
- intel_update_watermarks(i915);
+ intel_update_watermarks(display);
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
@@ -184,13 +184,13 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
* Return all the pipes using a transcoder in @transcoder_mask.
* For joiner configs return only the joiner primary.
*/
-static u8 get_transcoder_pipes(struct drm_i915_private *i915,
+static u8 get_transcoder_pipes(struct intel_display *display,
u8 transcoder_mask)
{
struct intel_crtc *temp_crtc;
u8 pipes = 0;
- for_each_intel_crtc(&i915->drm, temp_crtc) {
+ for_each_intel_crtc(display->drm, temp_crtc) {
struct intel_crtc_state *temp_crtc_state =
to_intel_crtc_state(temp_crtc->base.state);
@@ -215,7 +215,6 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
u8 *master_pipe_mask, u8 *slave_pipes_mask)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc *master_crtc;
@@ -234,20 +233,20 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
else
master_transcoder = crtc_state->master_transcoder;
- *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder));
- drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask));
+ *master_pipe_mask = get_transcoder_pipes(display, BIT(master_transcoder));
+ drm_WARN_ON(display->drm, !is_power_of_2(*master_pipe_mask));
master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1);
master_crtc_state = to_intel_crtc_state(master_crtc->base.state);
- *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask);
+ *slave_pipes_mask = get_transcoder_pipes(display, master_crtc_state->sync_mode_slaves_mask);
}
-static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_pipes_mask)
+static u8 get_joiner_secondary_pipes(struct intel_display *display, u8 primary_pipes_mask)
{
struct intel_crtc *primary_crtc;
u8 pipes = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, primary_crtc, primary_pipes_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, primary_crtc, primary_pipes_mask) {
struct intel_crtc_state *primary_crtc_state =
to_intel_crtc_state(primary_crtc->base.state);
@@ -260,45 +259,45 @@ static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_p
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc *temp_crtc;
u8 portsync_master_mask;
u8 portsync_slaves_mask;
u8 joiner_secondaries_mask;
- struct intel_crtc *temp_crtc;
/* TODO: Add support for MST */
get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask);
- joiner_secondaries_mask = get_joiner_secondary_pipes(i915,
+ joiner_secondaries_mask = get_joiner_secondary_pipes(display,
portsync_master_mask |
portsync_slaves_mask);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
portsync_master_mask & portsync_slaves_mask ||
portsync_master_mask & joiner_secondaries_mask ||
portsync_slaves_mask & joiner_secondaries_mask);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, joiner_secondaries_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, joiner_secondaries_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_slaves_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_master_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
joiner_secondaries_mask |
portsync_slaves_mask |
portsync_master_mask)
intel_crtc_disable_noatomic_complete(temp_crtc);
}
-static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+static void intel_modeset_update_connector_atomic_state(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state = connector->base.state;
struct intel_encoder *encoder =
@@ -320,7 +319,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_is_joiner_secondary(crtc_state))
return;
@@ -333,7 +332,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
+ if (DISPLAY_INFO(display)->color.degamma_lut_size) {
/* assume 1:1 mapping */
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
crtc_state->pre_csc_lut);
@@ -348,7 +347,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
* to gamma_lut as that is the only valid source of LUTs
* in the uapi.
*/
- drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
+ drm_WARN_ON(display->drm, crtc_state->post_csc_lut &&
crtc_state->pre_csc_lut);
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
@@ -367,15 +366,14 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
}
static void
-intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+intel_sanitize_plane_mapping(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
- if (DISPLAY_VER(i915) >= 4)
+ if (DISPLAY_VER(display) >= 4)
return;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_crtc *plane_crtc;
@@ -387,7 +385,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915)
if (pipe == crtc->pipe)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
@@ -424,12 +422,12 @@ static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc)
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *found_connector = NULL;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (&encoder->base == connector->base.encoder) {
found_connector = connector;
@@ -467,7 +465,7 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
bool needs_link_reset;
@@ -475,7 +473,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct intel_plane *plane;
/* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -516,7 +514,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
return true;
}
-static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
+static void intel_sanitize_all_crtcs(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *crtc;
@@ -531,7 +529,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
for (;;) {
u32 old_mask = crtcs_forced_off;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
u32 crtc_mask = drm_crtc_mask(&crtc->base);
if (crtcs_forced_off & crtc_mask)
@@ -544,7 +542,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
break;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -554,7 +552,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -566,22 +564,21 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_SANDYBRIDGE(i915) &&
+ return display->platform.sandybridge &&
crtc_state->hw.active &&
- crtc_state->shared_dpll &&
+ crtc_state->intel_dpll &&
crtc_state->port_clock == 0;
}
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
/*
* We need to check both for a crtc link (meaning that the encoder is
@@ -592,7 +589,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
@@ -600,7 +597,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -617,7 +614,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
@@ -651,18 +648,17 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *i915)
+static void readout_plane_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state;
@@ -676,13 +672,13 @@ static void readout_plane_state(struct drm_i915_private *i915)
intel_set_plane_visible(crtc_state, plane_state, visible);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
str_enabled_disabled(visible), pipe_name(pipe));
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -690,18 +686,17 @@ static void readout_plane_state(struct drm_i915_private *i915)
}
}
-static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+static void intel_modeset_readout_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -716,15 +711,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- readout_plane_state(i915);
+ readout_plane_state(display);
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state = NULL;
pipe = 0;
@@ -743,7 +738,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* encoder should read be linked to joiner primary */
WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
@@ -766,7 +761,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (encoder->sync_state)
encoder->sync_state(encoder, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
str_enabled_disabled(encoder->base.crtc),
@@ -775,7 +770,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
intel_dpll_readout_hw_state(display);
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct intel_crtc_state *crtc_state = NULL;
@@ -809,37 +804,37 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (connector->sync_state)
connector->sync_state(connector, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+ if (crtc_state->hw.active) {
intel_crtc_update_active_timings(crtc_state,
crtc_state->vrr.enable);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -855,14 +850,14 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
crtc_state->min_cdclk[plane->id]);
@@ -874,7 +869,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* TODO move here (or even earlier?) on all platforms */
if (DISPLAY_VER(display) >= 9)
- intel_wm_get_hw_state(i915);
+ intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
@@ -883,11 +878,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
static void
-get_encoder_power_domains(struct drm_i915_private *i915)
+get_encoder_power_domains(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -905,49 +900,51 @@ get_encoder_power_domains(struct drm_i915_private *i915)
}
}
-static void intel_early_display_was(struct drm_i915_private *i915)
+static void intel_early_display_was(struct intel_display *display)
{
/*
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
+ if (IS_DISPLAY_VER(display, 10, 12))
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- if (IS_HASWELL(i915))
- intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
+ if (display->platform.haswell)
+ intel_de_rmw(display, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
- if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) {
/* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(i915, CHICKEN_PAR1_1,
+ intel_de_rmw(display, CHICKEN_PAR1_1,
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ intel_de_rmw(display, CHICKEN_MISC_2,
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
KBL_ARB_FILL_SPARE_14);
}
}
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- intel_early_display_was(i915);
- intel_modeset_readout_hw_state(i915);
+ intel_early_display_was(display);
+ intel_vga_disable(display);
+
+ intel_modeset_readout_hw_state(display);
/* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(i915);
+ get_encoder_power_domains(display);
- intel_pch_sanitize(i915);
+ intel_pch_sanitize(display);
intel_cmtg_sanitize(display);
@@ -955,7 +952,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
*/
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -964,40 +961,40 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
drm_crtc_vblank_reset(&crtc->base);
if (crtc_state->hw.active) {
- intel_dmc_enable_pipe(display, crtc->pipe);
+ intel_dmc_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
}
}
- intel_fbc_sanitize(&i915->display);
+ intel_fbc_sanitize(display);
- intel_sanitize_plane_mapping(i915);
+ intel_sanitize_plane_mapping(display);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
intel_sanitize_encoder(encoder);
/*
* Sanitizing CRTCs needs their connector atomic state to be
* up-to-date, so ensure that already here.
*/
- intel_modeset_update_connector_atomic_state(i915);
+ intel_modeset_update_connector_atomic_state(display);
- intel_sanitize_all_crtcs(i915, ctx);
+ intel_sanitize_all_crtcs(display, ctx);
intel_dpll_sanitize_state(display);
/* TODO move earlier on all platforms */
if (DISPLAY_VER(display) < 9)
- intel_wm_get_hw_state(i915);
- intel_wm_sanitize(i915);
+ intel_wm_get_hw_state(display);
+ intel_wm_sanitize(display);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_power_domain_mask put_domains;
intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ if (drm_WARN_ON(display->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
index 3beff67b33d0..f5e6f3ae9572 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.h
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -6,10 +6,10 @@
#ifndef __INTEL_MODESET_SETUP_H__
#define __INTEL_MODESET_SETUP_H__
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx);
#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a008412fdd04..f2f6b9d9afa1 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -6,13 +6,14 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
@@ -28,9 +29,8 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
@@ -91,7 +91,6 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
@@ -103,7 +102,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s
* Yell if the encoder disagrees. Allow for slight
* rounding differences.
*/
- drm_WARN(&i915->drm, abs(fdi_dotclock - dotclock) > 1,
+ drm_WARN(display->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
@@ -113,17 +112,16 @@ static void
verify_encoder_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
struct drm_connector *connector;
const struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
@@ -166,7 +164,6 @@ verify_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_crtc_state *sw_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *hw_crtc_state;
@@ -185,7 +182,7 @@ verify_crtc_state(struct intel_atomic_state *state,
intel_crtc_get_pipe_config(hw_crtc_state);
/* we keep both pipes enabled on 830 */
- if (IS_I830(i915) && hw_crtc_state->hw.active)
+ if (display->platform.i830 && hw_crtc_state->hw.active)
hw_crtc_state->hw.active = sw_crtc_state->hw.active;
INTEL_DISPLAY_STATE_WARN(display,
@@ -246,7 +243,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_wm_state_verify(state, crtc);
verify_connector_state(state, crtc);
verify_crtc_state(state, crtc);
- intel_shared_dpll_state_verify(state, crtc);
+ intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
}
@@ -255,5 +252,5 @@ void intel_modeset_verify_disabled(struct intel_atomic_state *state)
{
verify_encoder_state(state);
verify_connector_state(state, NULL);
- intel_shared_dpll_verify_disabled(state);
+ intel_dpll_verify_disabled(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0eaa6cd6fe80..81efdb17fc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -31,10 +31,13 @@
#include <acpi/video.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
#include "intel_pci_config.h"
@@ -664,11 +667,10 @@ bool intel_opregion_asle_present(struct intel_display *display)
void intel_opregion_asle_intr(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_opregion *opregion = display->opregion;
if (opregion && opregion->asle)
- queue_work(i915->unordered_wq, &opregion->asle_work);
+ queue_work(display->wq.unordered, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index aff9a3455c1b..159a5f998ea0 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -31,6 +31,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
+
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
@@ -38,6 +39,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -291,7 +293,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
@@ -307,7 +308,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -359,14 +360,13 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index f5c972880391..2a20aaaaac39 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -462,3 +462,135 @@ void intel_panel_fini(struct intel_connector *connector)
drm_mode_destroy(connector->base.dev, fixed_mode);
}
}
+
+/*
+ * If the panel was already enabled at probe, and we took over the state, the
+ * panel prepared state is out of sync, and the panel followers won't be
+ * notified. We need to call drm_panel_prepare() on enabled panels.
+ *
+ * It would be natural to handle this e.g. in the connector ->sync_state hook at
+ * intel_modeset_readout_hw_state(), but that's unfortunately too early. We
+ * don't have drm_connector::kdev at that time. For now, figure out the state at
+ * ->late_register, and sync there.
+ */
+static void intel_panel_sync_state(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
+ if (ret)
+ return;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Panel prepare\n",
+ connector->base.base.id, connector->base.name);
+ intel_panel_prepare(crtc_state, conn_state);
+ }
+ }
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+}
+
+static const struct drm_panel_funcs dummy_panel_funcs = {
+};
+
+int intel_panel_register(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ ret = intel_backlight_device_register(connector);
+ if (ret)
+ return ret;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct device *dev = connector->base.kdev;
+ struct drm_panel *base;
+
+ /* Sanity check. */
+ if (drm_WARN_ON(display->drm, !dev))
+ goto out;
+
+ /*
+ * We need drm_connector::kdev for allocating the panel, to make
+ * drm_panel_add_follower() lookups work. The kdev is
+ * initialized in drm_sysfs_connector_add(), just before the
+ * connector .late_register() hooks. So we can't allocate the
+ * panel at connector init time, and can't allocate struct
+ * intel_panel with a drm_panel sub-struct. For now, use
+ * __devm_drm_panel_alloc() directly.
+ *
+ * The lookups also depend on drm_connector::fwnode being set in
+ * intel_acpi_assign_connector_fwnodes(). However, if that's
+ * missing, it will gracefully lead to -EPROBE_DEFER in
+ * drm_panel_add_follower().
+ */
+ base = __devm_drm_panel_alloc(dev, sizeof(*base), 0,
+ &dummy_panel_funcs,
+ connector->base.connector_type);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
+
+ panel->base = base;
+
+ drm_panel_add(panel->base);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Registered panel device '%s', has fwnode: %s\n",
+ connector->base.base.id, connector->base.name,
+ dev_name(dev), str_yes_no(dev_fwnode(dev)));
+
+ intel_panel_sync_state(connector);
+ }
+
+out:
+ return 0;
+
+err:
+ intel_backlight_device_unregister(connector);
+
+ return ret;
+}
+
+void intel_panel_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->base)
+ drm_panel_remove(panel->base);
+
+ intel_backlight_device_unregister(connector);
+}
+
+/* Notify followers, if any, about power being up. */
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_prepare(panel->base);
+}
+
+/* Notify followers, if any, about power going down. */
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_unprepare(panel->base);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b60d12322e5d..56a6412cf0fb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -23,6 +23,8 @@ void intel_panel_init_alloc(struct intel_connector *connector);
int intel_panel_init(struct intel_connector *connector,
const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
+int intel_panel_register(struct intel_connector *connector);
+void intel_panel_unregister(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
bool intel_panel_use_ssc(struct intel_display *display);
@@ -51,4 +53,8 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
struct intel_encoder *encoder);
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state);
+
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
new file mode 100644
index 000000000000..469e8a3cfb49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_display_core.h"
+#include "intel_pch.h"
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+/*
+ * Check for platforms where the south display is on the same PCI device or SoC
+ * die as the north display. The PCH (if it even exists) is not involved in
+ * display. Return a fake PCH type for south display handling on these
+ * platforms, without actually detecting the PCH, and PCH_NONE otherwise.
+ */
+static enum intel_pch intel_pch_fake_for_south_display(struct intel_display *display)
+{
+ enum intel_pch pch_type = PCH_NONE;
+
+ if (DISPLAY_VER(display) >= 20)
+ pch_type = PCH_LNL;
+ else if (display->platform.battlemage || display->platform.meteorlake)
+ pch_type = PCH_MTL;
+ else if (display->platform.dg2)
+ pch_type = PCH_DG2;
+ else if (display->platform.dg1)
+ pch_type = PCH_DG1;
+
+ return pch_type;
+}
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct intel_display *display, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ibex Peak PCH\n");
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) != 5);
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found CougarPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found PantherPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ /* PPT is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ return PCH_LPT_H;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ return PCH_LPT_LP;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_H;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_LP;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake);
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Kaby Lake PCH (KBP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Cannon Lake PCH (CNP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake PCH (CMP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake &&
+ !display->platform.rocketlake);
+ /* CMP is CNP compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* CMP-V is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ice Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.icelake);
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Mule Creek Canyon PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* MCC is TGP compatible */
+ return PCH_TGP;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Tiger Lake LP PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.tigerlake &&
+ !display->platform.rocketlake &&
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Jasper Lake PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* JSP is ICP compatible */
+ return PCH_ICP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.alderlake_s &&
+ !display->platform.alderlake_p);
+ return PCH_ADP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static void
+intel_virt_detect_pch(const struct intel_display *display,
+ unsigned short *pch_id, enum intel_pch *pch_type)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (display->platform.alderlake_s || display->platform.alderlake_p)
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (display->platform.tigerlake || display->platform.rocketlake)
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (display->platform.icelake)
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (display->platform.coffeelake ||
+ display->platform.cometlake)
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (display->platform.kabylake || display->platform.skylake)
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (display->platform.haswell_ult ||
+ display->platform.broadwell_ult)
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (display->platform.haswell || display->platform.broadwell)
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 6 || display->platform.ivybridge)
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 5)
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(display->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(display->drm, "Assuming no PCH\n");
+
+ *pch_type = intel_pch_type(display, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(display->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
+}
+
+void intel_pch_detect(struct intel_display *display)
+{
+ struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ pch_type = intel_pch_fake_for_south_display(display);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ drm_dbg_kms(display->drm,
+ "PCH not involved in display, using fake PCH type %d for south display\n",
+ pch_type);
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan through
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(display, id);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ display->pch_type = PCH_NOP;
+ } else if (!pch) {
+ if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ } else {
+ drm_dbg_kms(display->drm, "No PCH found.\n");
+ }
+ }
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
new file mode 100644
index 000000000000..cf4dab1b98bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+#include "intel_display_conversion.h"
+
+struct intel_display;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
+ PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
+ PCH_ADP, /* Alder Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
+ PCH_DG2,
+ PCH_MTL,
+ PCH_LNL,
+};
+
+#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
+#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
+#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
+#define HAS_PCH_TGP(display) (INTEL_PCH_TYPE(display) == PCH_TGP)
+#define HAS_PCH_ICP(display) (INTEL_PCH_TYPE(display) == PCH_ICP)
+#define HAS_PCH_CNP(display) (INTEL_PCH_TYPE(display) == PCH_CNP)
+#define HAS_PCH_SPT(display) (INTEL_PCH_TYPE(display) == PCH_SPT)
+#define HAS_PCH_LPT_H(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H)
+#define HAS_PCH_LPT_LP(display) (INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_LPT(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H || \
+ INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_CPT(display) (INTEL_PCH_TYPE(display) == PCH_CPT)
+#define HAS_PCH_IBX(display) (INTEL_PCH_TYPE(display) == PCH_IBX)
+#define HAS_PCH_NOP(display) (INTEL_PCH_TYPE(display) == PCH_NOP)
+#define HAS_PCH_SPLIT(display) (INTEL_PCH_TYPE(display) != PCH_NONE)
+
+void intel_pch_detect(struct intel_display *display);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 99f6d6f53fa7..3456c794e0e7 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -3,12 +3,14 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_fdi.h"
@@ -20,28 +22,27 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
- return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
- (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+ return HAS_PCH_IBX(display) || HAS_PCH_CPT(display) ||
+ (HAS_PCH_LPT_H(display) && pch_transcoder == PIPE_A);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
return PIPE_A;
else
return crtc->pipe;
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -52,16 +53,15 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -72,20 +72,19 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+ assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D);
INTEL_DISPLAY_STATE_WARN(display,
intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
@@ -93,20 +92,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+ assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID);
}
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_transcoder_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
@@ -117,45 +115,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct intel_display *display,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
+ u32 val = intel_de_read(display, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for HDMI %c\n",
port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, hdmi_reg, val);
+ intel_de_write(display, hdmi_reg, val);
}
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct intel_display *display,
enum port port, i915_reg_t dp_reg)
{
- u32 val = intel_de_read(dev_priv, dp_reg);
+ u32 val = intel_de_read(display, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for DP %c\n",
port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, dp_reg, val);
+ intel_de_write(display, dp_reg, val);
}
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct intel_display *display)
{
/*
* The BIOS may select transcoder B on some of the PCH
@@ -168,14 +166,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
* (see. intel_dp_link_down(), intel_disable_hdmi(),
* intel_disable_sdvo()).
*/
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+ ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+ ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID);
}
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
@@ -225,44 +223,42 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)));
+
+ intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(display, crtc_state->shared_dpll);
+ assert_dpll_enabled(display, crtc_state->intel_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(display, pipe);
assert_fdi_rx_enabled(display, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(display, reg);
/*
@@ -280,7 +276,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val = intel_de_read(display, reg);
pipeconf_val = intel_de_read(display, TRANSCONF(display, pipe));
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
@@ -299,7 +295,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
+ if (HAS_PCH_IBX(display) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
else
@@ -317,7 +313,6 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -326,18 +321,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
+ assert_pch_ports_disabled(display, pipe);
reg = PCH_TRANSCONF(pipe);
- intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
+ intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
/* Workaround: Clear the timing override chicken bit again. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN2(pipe),
TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
@@ -366,14 +361,13 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+ assert_pch_transcoder_disabled(display, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
@@ -382,14 +376,14 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* We need to program the right clock selection
* before writing the pixel multiplier into the DPLL.
*/
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
u32 sel;
temp = intel_de_read(display, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
+ if (crtc_state->intel_dpll ==
+ intel_get_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
@@ -401,11 +395,11 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that
+ * Note that dpll_enable tries to do the right thing, but
+ * get_dpll unconditionally resets the pll - we need that
* to have the right LVDS enable sequence.
*/
- intel_enable_shared_dpll(crtc_state);
+ intel_dpll_enable(crtc_state);
/* set transcoder timing, panel must allow it */
assert_pps_unlocked(display, pipe);
@@ -418,7 +412,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
+ if (HAS_PCH_CPT(display) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -459,23 +453,27 @@ void ilk_pch_disable(struct intel_atomic_state *state,
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
/* disable TRANS_DP_CTL */
- intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(pipe),
TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ intel_de_rmw(display, PCH_DPLL_SEL,
TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
+
+ intel_dpll_disable(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -497,10 +495,9 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
bool pll_active;
@@ -518,7 +515,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/*
* The pipe->pch transcoder and pch transcoder->pll
* mapping is fixed.
@@ -532,8 +529,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_PCH_PLL_A;
}
- crtc_state->shared_dpll = intel_get_shared_dpll_by_id(display, pll_id);
- pll = crtc_state->shared_dpll;
+ crtc_state->intel_dpll = intel_get_dpll_by_id(display, pll_id);
+ pll = crtc_state->intel_dpll;
pll_active = intel_dpll_get_hw_state(display, pll,
&crtc_state->dpll_hw_state);
@@ -550,8 +547,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
@@ -559,49 +554,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
assert_fdi_rx_enabled(display, PIPE_A);
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ pipeconf_val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ intel_de_write(display, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+ drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+static void lpt_disable_pch_transcoder(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
+ intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+ drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
+ intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+ assert_pch_transcoder_disabled(display, PIPE_A);
lpt_program_iclkip(crtc_state);
@@ -614,36 +609,36 @@ void lpt_pch_enable(struct intel_atomic_state *state,
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_pch_transcoder(display);
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display);
}
-void intel_pch_sanitize(struct drm_i915_private *i915)
+void intel_pch_sanitize(struct intel_display *display)
{
- if (HAS_PCH_IBX(i915))
- ibx_sanitize_pch_ports(i915);
+ if (HAS_PCH_IBX(display))
+ ibx_sanitize_pch_ports(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 35f8288af3d1..cd6b3ed05887 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -9,14 +9,14 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_link_m_n;
#ifdef I915
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
-void intel_pch_sanitize(struct drm_i915_private *i915);
+void intel_pch_sanitize(struct intel_display *display);
#else
-static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+static inline bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
return false;
@@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
}
-static inline void intel_pch_sanitize(struct drm_i915_private *i915)
+static inline void intel_pch_sanitize(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 33467de3d115..d3c5255bf1a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,119 +3,123 @@
* Copyright © 2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_reset_mphy(struct intel_display *display)
{
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
+ intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset assert timeout\n");
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
+ intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
-static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_program_mphy(struct intel_display *display)
{
u32 tmp;
- lpt_fdi_reset_mphy(dev_priv);
+ lpt_fdi_reset_mphy(display);
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x8008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2108, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x206C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x216C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2180, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x208C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x218C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2098, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2198, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20EC, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21EC, tmp, SBI_MPHY);
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct intel_display *display)
{
u32 temp;
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
struct iclkip_params {
@@ -175,87 +179,86 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
lpt_compute_iclkip(&p, clock);
- drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+ drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
/* Wait for initialization time */
udelay(24);
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct intel_display *display)
{
struct iclkip_params p;
u32 temp;
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
iclkip_params_init(&p);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
return 0;
}
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -268,68 +271,68 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
u32 reg, tmp;
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ if (drm_WARN(display->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ if (drm_WARN(display->drm, HAS_PCH_LPT_LP(display) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(24);
if (with_spread) {
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
+ lpt_fdi_program_mphy(display);
}
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct intel_display *display)
{
u32 reg, tmp;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
}
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -364,39 +367,39 @@ static const u16 sscdivintphase[] = {
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
u32 tmp;
int idx = BEND_IDX(steps);
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ if (drm_WARN_ON(display->drm, steps % 5 != 0))
return;
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
- intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#undef BEND_IDX
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct intel_display *display)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -405,18 +408,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
- if (IS_BROADWELL(dev_priv) &&
+ if (display->platform.broadwell &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
+static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -424,7 +426,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
+ if ((display->platform.broadwell || display->platform.haswell_ult) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
@@ -432,12 +434,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
return false;
}
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
@@ -462,39 +464,38 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->display.dpll.pch_ssc_use = 0;
+ display->dpll.pch_ssc_use = 0;
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ if (spll_uses_pch_ssc(display)) {
+ drm_dbg_kms(display->drm, "SPLL using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->display.dpll.pch_ssc_use)
+ if (display->dpll.pch_ssc_use)
return;
if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
+ lpt_bend_clkout_dp(display, 0);
+ lpt_enable_clkout_dp(display, true, true);
} else {
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
}
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
u32 val, final;
bool has_lvds = false;
@@ -521,7 +522,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
has_ck505 = display->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -530,7 +531,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
u32 temp;
temp = intel_de_read(display, PCH_DPLL(pll->info->id));
@@ -607,7 +608,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ drm_dbg_kms(display->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
@@ -623,7 +624,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
@@ -670,10 +671,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct intel_display *display)
{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
+ ilk_init_pch_refclk(display);
+ else if (HAS_PCH_LPT(display))
+ lpt_init_pch_refclk(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index ae3403c0ced8..25cc53c568bc 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -8,25 +8,25 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
#ifdef I915
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct intel_display *display);
+int lpt_get_iclkip(struct intel_display *display);
int lpt_iclkip(const struct intel_crtc_state *crtc_state);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct intel_display *display);
+void lpt_disable_clkout_dp(struct intel_display *display);
#else
static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
}
-static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_iclkip(struct intel_display *display)
{
}
-static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+static inline int lpt_get_iclkip(struct intel_display *display)
{
return 0;
}
@@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+static inline void intel_init_pch_refclk(struct intel_display *display)
{
}
-static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_clkout_dp(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 3c3ecf288570..13541be4d6df 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 10e26c3db946..c2b4b2254190 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,10 +30,10 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
#include "intel_pipe_crc_regs.h"
@@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_auto_source(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
@@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(&dev_priv->drm);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ drm_modeset_lock_all(display->drm);
+ for_each_intel_encoder(display->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
+ drm_WARN(display->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
+ if (!display->platform.cherryview)
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
+ if (!SUPPORTS_TV(display))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
@@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct intel_display *display,
enum pipe pipe)
{
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
switch (pipe) {
case PIPE_A:
@@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- i915gm_irq_cstate_wa(dev_priv, enable);
+ if (display->platform.i945gm || display->platform.i915gm)
+ i915gm_irq_cstate_wa(display, enable);
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -311,7 +311,7 @@ retry:
pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->uapi.mode_changed = true;
@@ -327,13 +327,13 @@ put_state:
drm_atomic_state_put(state);
unlock:
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.ironlake || display->platform.sandybridge)
return ilk_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_pipe_crc_ctl_reg(display, pipe, source, val);
else
- return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return skl_pipe_crc_ctl_reg(display, pipe, source, val);
}
static int
@@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc)
spin_lock_init(&pipe_crc->lock);
}
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
}
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
- if (DISPLAY_VER(dev_priv) == 2)
- return i8xx_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_crc_source_valid(dev_priv, source);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_crc_source_valid(dev_priv, source);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- return ilk_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_crc_source_valid(dev_priv, source);
+ if (DISPLAY_VER(display) == 2)
+ return i8xx_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_crc_source_valid(display, source);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_crc_source_valid(display, source);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ return ilk_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_crc_source_valid(display, source);
else
- return skl_crc_source_valid(dev_priv, source);
+ return skl_crc_source_valid(display, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
- intel_is_valid_crc_source(dev_priv, source) == 0) {
+ intel_is_valid_crc_source(display, source) == 0) {
*values_cnt = 5;
return 0;
}
@@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
if (enable)
intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(display, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
if (!source) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_undo_pipe_scramble_reset(display, pipe);
}
pipe_crc->skipped = 0;
@@ -636,7 +635,7 @@ out:
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 7276179df878..36fb07471deb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -33,20 +33,22 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-resv.h>
+#include <linux/iosys-map.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_cache.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
#include "gem/i915_gem_object.h"
-#include "i915_config.h"
#include "i915_scheduler_types.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
@@ -54,6 +56,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
+#include "intel_plane.h"
+#include "intel_psr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -174,11 +179,27 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(display)->cursor_needs_physical;
}
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier)
{
+ if (intel_format_info_is_yuv_semiplanar(drm_format_info(format), modifier) ||
+ format == DRM_FORMAT_C8)
+ return false;
+
return plane->can_async_flip && plane->can_async_flip(modifier);
}
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return false;
+
+ return intel_plane_can_async_flip(to_intel_plane(plane),
+ format, modifier);
+}
+
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate)
@@ -318,7 +339,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
* display blinking due to constant cdclk changes.
*/
if (new_crtc_state->min_cdclk[plane->id] <=
- cdclk_state->min_cdclk[crtc->pipe])
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
return 0;
drm_dbg_kms(display->drm,
@@ -326,7 +347,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
plane->base.base.id, plane->base.name,
new_crtc_state->min_cdclk[plane->id],
crtc->base.base.id, crtc->base.name,
- cdclk_state->min_cdclk[crtc->pipe]);
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
*need_cdclk_calc = true;
return 0;
@@ -719,8 +740,8 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
return NULL;
}
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane)
+static int plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state);
struct intel_plane_state *new_plane_state =
@@ -968,10 +989,10 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
i9xx_crtc_planes_update_arm(dsb, state, crtc);
}
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position)
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1070,7 +1091,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
/* Wa_16023981245 */
if ((DISPLAY_VERx100(display) == 2000 ||
- DISPLAY_VERx100(display) == 3000) &&
+ DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 3002) &&
src_x % 2 != 0)
hsub = 2;
} else {
@@ -1251,14 +1273,176 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
intel_plane_unpin_fb(old_plane_state);
}
+/* Handle Y-tiling, only if DPT is enabled (otherwise disabling tiling is easier)
+ * All DPT hardware have 128-bytes width tiling, so Y-tile dimension is 32x32
+ * pixels for 32bits pixels.
+ */
+#define YTILE_WIDTH 32
+#define YTILE_HEIGHT 32
+#define YTILE_SIZE (YTILE_WIDTH * YTILE_HEIGHT * 4)
+
+static unsigned int intel_ytile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is x4 x3 x2 y4 y3 y2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 0x1f) << 2) | ((x & 0x1c) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static unsigned int intel_4tile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is y4 y3 x4 y2 x3 x2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 3) << 2) | ((x & 0xc) << 2) | (y & 4) << 4 |
+ ((x & 0x10) << 3) | ((y & 0x18) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static void intel_panic_flush(struct drm_plane *plane)
+{
+ struct intel_plane_state *plane_state = to_intel_plane_state(plane->state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(plane->state->crtc->state);
+ struct intel_plane *iplane = to_intel_plane(plane);
+ struct intel_display *display = to_intel_display(iplane);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ intel_bo_panic_finish(intel_fb);
+
+ if (crtc_state->enable_psr2_sel_fetch) {
+ /* Force a full update for psr2 */
+ intel_psr2_panic_force_full_update(display, crtc_state);
+ }
+
+ /* Flush the cache and don't disable tiling if it's the fbdev framebuffer.*/
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ struct iosys_map map;
+
+ intel_fbdev_get_map(display->fbdev.fbdev, &map);
+ drm_clflush_virt_range(map.vaddr, fb->pitches[0] * fb->height);
+ return;
+ }
+
+ if (fb->modifier && iplane->disable_tiling)
+ iplane->disable_tiling(iplane);
+}
+
+static unsigned int (*intel_get_tiling_func(u64 fb_modifier))(unsigned int width,
+ unsigned int x,
+ unsigned int y)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return intel_ytile_get_offset;
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return intel_4tile_get_offset;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ default:
+ /* Not supported yet */
+ return NULL;
+ }
+}
+
+static int intel_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct intel_plane_state *plane_state;
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct intel_display *display = to_intel_display(plane->dev);
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ plane_state = to_intel_plane_state(plane->state);
+ fb = plane_state->hw.fb;
+ intel_fb = to_intel_framebuffer(fb);
+
+ obj = intel_fb_bo(fb);
+ if (!obj)
+ return -ENODEV;
+
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]);
+ } else {
+ int ret;
+ /* Can't disable tiling if DPT is in use */
+ if (intel_fb_uses_dpt(fb)) {
+ if (fb->format->cpp[0] != 4)
+ return -EOPNOTSUPP;
+ intel_fb->panic_tiling = intel_get_tiling_func(fb->modifier);
+ if (!intel_fb->panic_tiling)
+ return -EOPNOTSUPP;
+ }
+ sb->private = intel_fb;
+ ret = intel_bo_panic_setup(sb);
+ if (ret)
+ return ret;
+ }
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling, RC, CCS, CC
+ * will be disabled in disable_tiling()
+ */
+ sb->format = drm_format_info(fb->format->format);
+ sb->pitch[0] = fb->pitches[0];
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
};
+static const struct drm_plane_helper_funcs intel_primary_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+ .get_scanout_buffer = intel_get_scanout_buffer,
+ .panic_flush = intel_panic_flush,
+};
+
void intel_plane_helper_add(struct intel_plane *plane)
{
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&plane->base, &intel_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
}
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
@@ -1418,8 +1602,8 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1513,7 +1697,7 @@ static int intel_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_check_planes(struct intel_atomic_state *state)
+int intel_plane_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1527,7 +1711,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_atomic_check(state, plane);
+ ret = plane_atomic_check(state, plane);
if (ret) {
drm_dbg_atomic(display->drm,
"[PLANE:%d:%s] atomic driver check failed\n",
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 6efac923dcbc..4ef012c08fa4 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -3,8 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
-#ifndef __INTEL_ATOMIC_PLANE_H__
-#define __INTEL_ATOMIC_PLANE_H__
+#ifndef __INTEL_PLANE_H__
+#define __INTEL_PLANE_H__
#include <linux/types.h>
@@ -21,7 +21,8 @@ enum plane_id;
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
@@ -68,15 +69,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane);
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position);
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -84,10 +83,13 @@ void intel_plane_helper_add(struct intel_plane *plane);
bool intel_plane_needs_physical(struct intel_plane *plane);
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state);
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_atomic_check_planes(struct intel_atomic_state *state);
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier);
-#endif /* __INTEL_ATOMIC_PLANE_H__ */
+#endif /* __INTEL_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index b1675b46e06c..4246173ed311 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -6,12 +6,13 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
@@ -52,44 +53,57 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
return false;
}
+static enum intel_memory_type
+initial_plane_memory_type(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.dgfx)
+ return INTEL_MEMORY_LOCAL;
+ else if (HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return INTEL_MEMORY_STOLEN_LOCAL;
+ else
+ return INTEL_MEMORY_STOLEN_SYSTEM;
+}
+
static bool
-initial_plane_phys_lmem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
+initial_plane_phys(struct intel_display *display,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_memory_region *mem;
+ enum intel_memory_type mem_type;
+ bool is_present, is_local;
dma_addr_t dma_addr;
- gen8_pte_t pte;
u32 base;
+ mem_type = initial_plane_memory_type(display);
+ mem = intel_memory_region_by_type(i915, mem_type);
+ if (!mem) {
+ drm_dbg_kms(display->drm,
+ "Initial plane memory region (type %s) not initialized\n",
+ intel_memory_type_str(mem_type));
+ return false;
+ }
+
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- gte += base / I915_GTT_PAGE_SIZE;
+ dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
+ if (!is_present) {
drm_err(display->drm,
- "Initial plane programming missing PTE_LM bit\n");
+ "Initial plane FB PTE not present\n");
return false;
}
- dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
-
- if (IS_DGFX(i915))
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
- else
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
+ if (intel_memory_type_is_local(mem->type) != is_local) {
+ drm_err(display->drm,
+ "Initial plane FB PTE unsuitable for %s\n",
+ mem->region.name);
return false;
}
- /*
- * On lmem we don't currently expect this to
- * ever be placed in the stolen portion.
- */
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
@@ -107,42 +121,6 @@ initial_plane_phys_lmem(struct intel_display *display,
return true;
}
-static bool
-initial_plane_phys_smem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_memory_region *mem;
- u32 base;
-
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
-
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
- return false;
- }
-
- /* FIXME get and validate the dma_addr from the PTE */
- plane_config->phys_base = base;
- plane_config->mem = mem;
-
- return true;
-}
-
-static bool
-initial_plane_phys(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(display, plane_config);
- else
- return initial_plane_phys_smem(display, plane_config);
-}
-
static struct i915_vma *
initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
@@ -311,7 +289,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
+ intel_bo_to_drm_bo(vma->obj),
+ fb->format, &mode_cmd)) {
drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 63301a01906c..d806c15db7ce 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,12 +5,14 @@
#include <linux/bitops.h>
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
@@ -292,40 +294,17 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *new_bw_state, *old_bw_state;
- const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
struct intel_crtc *crtc;
int i;
- new_bw_state = intel_atomic_get_new_bw_state(state);
- old_bw_state = intel_atomic_get_old_bw_state(state);
- if (new_bw_state && new_bw_state->qgv_point_peakbw !=
- old_bw_state->qgv_point_peakbw)
+ if (intel_bw_pmdemand_needs_update(state))
return true;
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (new_dbuf_state &&
- new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ if (intel_dbuf_pmdemand_needs_update(state))
return true;
- if (DISPLAY_VER(display) < 30) {
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices !=
- old_dbuf_state->enabled_slices)
- return true;
- }
-
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- if (new_cdclk_state &&
- (new_cdclk_state->actual.cdclk !=
- old_cdclk_state->actual.cdclk ||
- new_cdclk_state->actual.voltage_level !=
- old_cdclk_state->actual.voltage_level))
+ if (intel_cdclk_pmdemand_needs_update(state))
return true;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -360,7 +339,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
new_pmdemand_state->params.qclk_gv_index = 0;
- new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
+ new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(new_bw_state);
new_dbuf_state = intel_atomic_get_dbuf_state(state);
if (IS_ERR(new_dbuf_state))
@@ -368,12 +347,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (DISPLAY_VER(display) < 30) {
new_pmdemand_state->params.active_dbufs =
- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3);
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3);
} else {
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display));
}
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -381,9 +360,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return PTR_ERR(new_cdclk_state);
new_pmdemand_state->params.voltage_index =
- new_cdclk_state->actual.voltage_level;
+ intel_cdclk_actual_voltage_level(new_cdclk_state);
new_pmdemand_state->params.cdclk_freq_mhz =
- DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
+ DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000);
intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
@@ -476,13 +455,34 @@ static bool intel_pmdemand_req_complete(struct intel_display *display)
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct intel_display *display)
+static void intel_pmdemand_poll(struct intel_display *display)
{
- if (!wait_event_timeout(display->pmdemand.waitqueue,
- intel_pmdemand_req_complete(display),
- msecs_to_jiffies_timeout(10)))
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ int ret;
+
+ ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ 50, timeout_ms, &status);
+
+ if (ret == -ETIMEDOUT)
drm_err(display->drm,
- "timed out waiting for Punit PM Demand Response\n");
+ "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
+ timeout_ms, status);
+}
+
+static void intel_pmdemand_wait(struct intel_display *display)
+{
+ /* Wa_14024400148 For lnl use polling method */
+ if (DISPLAY_VER(display) == 20) {
+ intel_pmdemand_poll(display);
+ } else {
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
+ msecs_to_jiffies_timeout(10)))
+ drm_err(display->drm,
+ "timed out waiting for Punit PM Demand Response\n");
+ }
}
/* Required to be programmed during Display Init Sequences. */
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 617ce4993172..b64d0b30f5b1 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -5,11 +5,14 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -91,7 +94,6 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -134,7 +136,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
+ if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -158,7 +160,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
+ vlv_force_pll_off(display, pipe);
if (release_cl_override)
chv_phy_powergate_ch(display, phy, ch, false);
@@ -351,21 +353,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
static int intel_num_pps(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview)
return 2;
if (display->platform.geminilake || display->platform.broxton)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -374,11 +374,10 @@ static int intel_num_pps(struct intel_display *display)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -500,7 +499,6 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -519,7 +517,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (display->platform.geminilake || display->platform.broxton ||
- INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ INTEL_PCH_TYPE(display) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(display, pps_idx);
@@ -744,11 +742,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return false;
+ lockdep_assert_held(&display->pps.mutex);
+
cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
intel_dp->pps.want_panel_vdd = true;
@@ -896,7 +894,6 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -912,7 +909,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations.
*/
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
- queue_delayed_work(i915->unordered_wq,
+ queue_delayed_work(display->wq.unordered,
&intel_dp->pps.panel_vdd_work, delay);
}
@@ -925,11 +922,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return;
+ lockdep_assert_held(&display->pps.mutex);
+
INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
@@ -1592,7 +1589,6 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
@@ -1639,7 +1635,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
* power sequencer any more. */
if (display->platform.valleyview || display->platform.cherryview) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
switch (port) {
case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
@@ -1792,9 +1788,7 @@ void intel_pps_unlock_regs_wa(struct intel_display *display)
void intel_pps_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
+ if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton)
display->pps.mmio_base = PCH_PPS_BASE;
else if (display->platform.valleyview || display->platform.cherryview)
display->pps.mmio_base = VLV_PPS_BASE;
@@ -1837,7 +1831,6 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
@@ -1846,7 +1839,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
u32 port_sel;
pp_reg = PP_CONTROL(display, 0);
@@ -1855,7 +1848,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
@@ -1883,7 +1876,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, LVDS, &panel_pipe);
}
val = intel_de_read(display, pp_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_pps_regs.h b/drivers/gpu/drm/i915/display/intel_pps_regs.h
index 8f9dbfab9523..2f014d929d32 100644
--- a/drivers/gpu/drm/i915/display/intel_pps_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_pps_regs.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_PPS_REGS_H__
#define __INTEL_PPS_REGS_H__
-#include "intel_display_conversion.h"
#include "intel_display_reg_defs.h"
/* Panel power sequencing */
@@ -14,11 +13,11 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(dev_priv, pps_idx, reg) \
- _MMIO(__to_intel_display(dev_priv)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
+#define _MMIO_PPS(display, pps_idx, reg) \
+ _MMIO((display)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
#define _PP_STATUS 0x61200
-#define PP_STATUS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_STATUS)
+#define PP_STATUS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_STATUS)
#define PP_ON REG_BIT(31)
/*
* Indicates that all dependencies of the panel are on:
@@ -45,7 +44,7 @@
#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
-#define PP_CONTROL(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_CONTROL)
+#define PP_CONTROL(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_CONTROL)
#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
@@ -55,7 +54,7 @@
#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
-#define PP_ON_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_ON_DELAYS)
+#define PP_ON_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_ON_DELAYS)
#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
@@ -66,12 +65,12 @@
#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
-#define PP_OFF_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_OFF_DELAYS)
+#define PP_OFF_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_OFF_DELAYS)
#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
-#define PP_DIVISOR(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_DIVISOR)
+#define PP_DIVISOR(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_DIVISOR)
#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4e938bad808c..41988e193a41 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,8 +26,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
@@ -36,7 +36,10 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_frontbuffer.h"
@@ -44,7 +47,9 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
#include "skl_universal_plane.h"
/**
@@ -443,7 +448,6 @@ static void psr_event_print(struct intel_display *display,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
@@ -463,8 +467,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(display) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv,
- PSR_EVENT(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ PSR_EVENT(display, cpu_transcoder),
0, 0);
psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
@@ -488,7 +492,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
}
@@ -512,7 +516,7 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
if (intel_dp->psr.sink_panel_replay_su_support)
drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_PANEL_REPLAY_CAPABILITY,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
&su_capability);
else
su_capability = intel_dp->psr_dpcd[1];
@@ -524,7 +528,7 @@ static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_X_GRANULARITY :
DP_PSR2_SU_X_GRANULARITY;
}
@@ -532,7 +536,7 @@ static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
DP_PSR2_SU_Y_GRANULARITY;
}
@@ -604,7 +608,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
return;
}
- if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
@@ -613,7 +618,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_panel_replay_support = true;
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
drm_dbg_kms(display->drm,
@@ -672,10 +678,12 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
- &intel_dp->pr_dpcd);
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT)
_panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr_dpcd[0])
@@ -689,7 +697,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -704,7 +711,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
@@ -733,7 +740,8 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return false;
return panel_replay ?
- intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
+ intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
psr2_su_region_et_global_enabled(intel_dp);
}
@@ -794,31 +802,10 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
-static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- u8 val;
-
- /*
- * eDP Panel Replay uses always ALPM
- * PSR2 uses ALPM but PSR1 doesn't
- */
- if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
- !crtc_state->has_sel_update))
- return;
-
- val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
-
- if (crtc_state->has_panel_replay)
- val |= DP_ALPM_MODE_AUX_LESS;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_psr_enable_sink_alpm(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, crtc_state);
crtc_state->has_panel_replay ?
_panel_replay_enable_sink(intel_dp, crtc_state) :
@@ -839,7 +826,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
if (DISPLAY_VER(display) >= 11)
@@ -873,7 +859,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
* WA 0479: hsw,bdw
* "Do not skip both TP1 and TP2/TP3"
*/
- if (DISPLAY_VER(dev_priv) < 9 &&
+ if (DISPLAY_VER(display) < 9 &&
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
@@ -906,10 +892,21 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
return idle_frames;
}
+static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u32 current_dc_state = intel_display_power_get_current_dc_state(display);
+ struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+
+ return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
+ current_dc_state != DC_STATE_EN_UPTO_DC6) ||
+ intel_dp->psr.active_non_psr_pipes ||
+ READ_ONCE(vblank->enabled);
+}
+
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -919,7 +916,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr.link_standby)
@@ -935,6 +932,14 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ true);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -1013,14 +1018,21 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
+ u8 idle_frames;
- val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ idle_frames = 0;
+ else
+ idle_frames = psr_compute_idle_frames(intel_dp);
+ val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
- if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
@@ -1038,7 +1050,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1103,9 +1115,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
@@ -1183,10 +1193,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1197,7 +1206,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
@@ -1223,7 +1231,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1264,7 +1272,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1286,7 +1293,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1412,7 +1419,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1421,20 +1427,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
- if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG2(dev_priv)) {
+ if (display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not completely functional in this stepping\n");
return false;
@@ -1453,7 +1459,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1486,7 +1492,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1573,6 +1579,12 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (!CAN_PSR(intel_dp))
return false;
+ /*
+ * Currently PSR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return false;
+
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
if (entry_setup_frames >= 0) {
@@ -1604,6 +1616,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1634,12 +1652,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(display->drm,
- "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
return true;
}
@@ -1658,6 +1670,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1687,12 +1702,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- /*
- * Currently PSR/PR doesn't work reliably with VRR enabled.
- */
- if (crtc_state->vrr.enable)
- return;
-
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1711,6 +1720,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm,
"PSR disabled to workaround PSR FSM hang issue\n");
}
+
+ /* Rest is for Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1827,7 +1854,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1866,7 +1892,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(display) < 20)
@@ -1880,7 +1906,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
@@ -1903,9 +1929,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- if (intel_dp_is_edp(intel_dp))
- intel_alpm_configure(intel_dp, crtc_state);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1925,7 +1948,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv)))
+ display->platform.alderlake_p))
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
@@ -1936,10 +1959,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
+
+ intel_alpm_configure(intel_dp, crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1995,6 +2026,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
+ intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2006,8 +2038,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
- * Enabling here only for PSR. Panel Replay enable bit is already
- * written at this point. See
+ * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
+ * bit is already written at this point. Sink ALPM is enabled here for
+ * PSR and Panel Replay. See
* intel_psr_panel_replay_enable_sink. Modifiers/options:
* - Selective Update
* - Region Early Transport
@@ -2024,7 +2057,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
- intel_dp->psr.paused = false;
+ intel_dp->psr.pause_counter = 0;
/*
* Link_ok is sticky and set here on PSR enable. We can assume link
@@ -2070,6 +2103,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
+ if (DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ false);
+
val = intel_de_rmw(display,
psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
@@ -2104,7 +2143,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2136,7 +2174,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2144,16 +2182,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
- /* Panel Replay on eDP is always using ALPM aux less. */
- if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
- ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
-
- intel_de_rmw(display,
- PORT_ALPM_CTL(cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- }
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
+ intel_alpm_disable(intel_dp);
/* Disable PSR on Sink */
if (!intel_dp->psr.panel_replay_enabled) {
@@ -2164,12 +2194,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DP_RECEIVER_ALPM_CONFIG, 0);
}
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
+
intel_dp->psr.enabled = false;
intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.sel_update_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ intel_dp->psr.active_non_psr_pipes = 0;
}
/**
@@ -2210,7 +2247,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2223,12 +2259,10 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
- /* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(display->drm, psr->paused);
-
- intel_psr_exit(intel_dp);
- intel_psr_wait_exit_locked(intel_dp);
- psr->paused = true;
+ if (intel_dp->psr.pause_counter++ == 0) {
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ }
mutex_unlock(&psr->lock);
@@ -2244,6 +2278,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
*/
void intel_psr_resume(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2251,28 +2286,36 @@ void intel_psr_resume(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (!psr->paused)
- goto unlock;
+ if (!psr->enabled)
+ goto out;
- psr->paused = false;
- intel_psr_activate(intel_dp);
+ if (!psr->pause_counter) {
+ drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
+ goto out;
+ }
-unlock:
+ if (--intel_dp->psr.pause_counter == 0)
+ intel_psr_activate(intel_dp);
+
+out:
mutex_unlock(&psr->lock);
}
/**
- * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
+ * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
+ * notification.
* @crtc_state: CRTC status
*
* We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
* prevent it in case of Panel Replay. Panel Replay switches main link off on
* DC entry. This means vblank interrupts are not fired and is a problem if
- * user-space is polling for vblank events.
+ * user-space is polling for vblank events. Also Wa_16025596647 needs
+ * information when vblank is enabled/disabled.
*/
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
@@ -2283,8 +2326,15 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp_is_edp(intel_dp) &&
- CAN_PANEL_REPLAY(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (CAN_PANEL_REPLAY(intel_dp))
+ return true;
+
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ CAN_PSR(intel_dp))
return true;
}
@@ -2312,37 +2362,76 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
CURSURFLIVE(display, crtc->pipe), 0);
}
-static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+/**
+ * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * @crtc_state: the crtc state
+ *
+ * Return minimum vblank delay needed by PSR.
+ */
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
+
+ if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
+ crtc_state->has_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
+ else
+ return 1;
+}
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+{
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
@@ -2405,8 +2494,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
@@ -2420,7 +2507,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2474,13 +2561,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2601,12 +2687,11 @@ static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ display->platform.alderlake_p || display->platform.tigerlake)) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2803,11 +2888,30 @@ skip_sel_fetch_set_loop:
return 0;
}
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = man_trk_ctl_enable_bit_get(display);
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
+
+ /* Directly write the register */
+ intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
+}
+
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2839,7 +2943,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
new_crtc_state->has_sel_update != psr->sel_update_enabled ||
new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
- (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
intel_psr_disable_locked(intel_dp);
else if (new_crtc_state->wm_level_disabled)
/* Wa_14015648006 */
@@ -3171,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
intel_psr_configure_full_frame_update(intel_dp);
@@ -3235,7 +3341,6 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3250,16 +3355,15 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return;
tgl_psr2_enable_dc3co(intel_dp);
- mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
+ mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
}
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@@ -3276,13 +3380,15 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* existing SU configuration
*/
intel_psr_configure_full_frame_update(intel_dp);
- }
- intel_psr_force_update(intel_dp);
+ intel_psr_force_update(intel_dp);
+ } else {
+ intel_psr_exit(intel_dp);
+ }
- if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
!intel_dp->psr.busy_frontbuffer_bits)
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
/**
@@ -3322,7 +3428,7 @@ void intel_psr_flush(struct intel_display *display,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused)
+ if (intel_dp->psr.pause_counter)
goto unlock;
if (origin == ORIGIN_FLIP ||
@@ -3419,29 +3525,14 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
- u8 val;
- int r;
if (!psr->sel_update_enabled)
return;
- r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
- if (r != 1) {
- drm_err(display->drm, "Error reading ALPM status\n");
- return;
- }
-
- if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ if (intel_alpm_get_error(intel_dp)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(display->drm,
- "ALPM lock timeout error, disabling PSR\n");
-
- /* Clearing error */
- drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
}
}
@@ -3626,6 +3717,168 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
}
}
+/* Wa_16025596647 */
+static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool dc5_dc6_blocked;
+
+ if (!intel_dp->psr.active)
+ return;
+
+ dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
+
+ if (intel_dp->psr.sel_update_enabled)
+ psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
+ psr_compute_idle_frames(intel_dp));
+ else
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ dc5_dc6_blocked);
+}
+
+static void psr_dc5_dc6_wa_work(struct work_struct *work)
+{
+ struct intel_display *display = container_of(work, typeof(*display),
+ psr_dc5_dc6_wa_work);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
+ * psr_dc5_dc6_wa_work used for applying/removing the workaround.
+ */
+void intel_psr_notify_dc5_dc6(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ schedule_work(&display->psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
+ * psr_dc5_dc6_wa_work used for applying the workaround.
+ */
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
+ * @state: intel atomic state
+ * @crtc: intel crtc
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when pipe is getting enabled/disabled
+ */
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 active_non_psr_pipes;
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
+ goto unlock;
+
+ active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
+
+ if (enable)
+ active_non_psr_pipes |= BIT(crtc->pipe);
+ else
+ active_non_psr_pipes &= ~BIT(crtc->pipe);
+
+ if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
+ goto unlock;
+
+ if ((enable && intel_dp->psr.active_non_psr_pipes) ||
+ (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+ goto unlock;
+ }
+
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
+ * @display: intel display struct
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when vblank is getting enabled/disabled
+ */
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (intel_dp->psr.panel_replay_enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+
+ if (intel_dp->psr.enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ return;
+ }
+
+ /*
+ * NOTE: intel_display_power_set_target_dc_state is used
+ * only by PSR * code for DC3CO handling. DC3CO target
+ * state is currently disabled in * PSR code. If DC3CO
+ * is taken into use we need take that into account here
+ * as well.
+ */
+ intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
+ DC_STATE_EN_UPTO_DC6);
+}
+
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
@@ -3634,8 +3887,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
- intel_dp->psr.panel_replay_enabled)) {
+ if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
+ (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3690,7 +3943,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
seq_printf(m, "\n");
}
@@ -3728,10 +3982,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool enabled;
u32 val, psr2_ctl;
@@ -3740,7 +3993,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&psr->lock);
intel_psr_print_mode(intel_dp, m);
@@ -3796,24 +4049,30 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
int frame;
/*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
+ * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
+ * (it returns zeros only) and it has been removed on Xe2_LPD.
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(display,
- PSR2_SU_STATUS(display, cpu_transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
+ if (DISPLAY_VER(display) < 13) {
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
}
seq_printf(m, "PSR2 selective fetch: %s\n",
@@ -3822,7 +4081,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -3853,9 +4112,7 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct intel_display *display = data;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(display))
@@ -3866,12 +4123,9 @@ i915_edp_psr_debug_set(void *data, u64 val)
drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
// TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ ret = intel_psr_debug_set(intel_dp, val);
}
return ret;
@@ -4004,3 +4258,19 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
+
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * eDP Panel Replay uses always ALPM
+ * PSR2 uses ALPM but PSR1 doesn't
+ */
+ return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
+ crtc_state->has_panel_replay);
+}
+
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index a43a374cff55..9b061a22361f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -57,9 +57,17 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable);
+void intel_psr_notify_dc5_dc6(struct intel_display *display);
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display);
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable);
bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
@@ -67,7 +75,11 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 795e6b9cc575..8afbf5a38335 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -266,6 +266,16 @@
#define _PIPE_SRCSZ_ERLY_TPT_B 0x71074
#define PIPE_SRCSZ_ERLY_TPT(pipe) _MMIO_PIPE((pipe), _PIPE_SRCSZ_ERLY_TPT_A, _PIPE_SRCSZ_ERLY_TPT_B)
+#define _PR_ALPM_CTL_A 0x60948
+#define PR_ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PR_ALPM_CTL_A)
+#define PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU BIT(6)
+#define PR_ALPM_CTL_RFB_UPDATE_CONTROL BIT(5)
+#define PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE BIT(4)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK REG_GENMASK(1, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1_OR_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 1)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 2)
+
#define _ALPM_CTL_A 0x60950
#define ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
@@ -325,8 +335,8 @@
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
-#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK, val)
#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
-#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c
new file mode 100644
index 000000000000..dfcff924f0ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ *
+ * LPT/WPT IOSF sideband.
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
+
+/* SBI access */
+static int intel_sbi_rw(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
+{
+ u32 cmd;
+
+ lockdep_assert_held(&display->sbi.lock);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
+ drm_err(display->drm, "timeout waiting for SBI to become ready\n");
+ return -EBUSY;
+ }
+
+ intel_de_write_fw(display, SBI_ADDR, SBI_ADDR_VALUE(reg));
+ intel_de_write_fw(display, SBI_DATA, is_read ? 0 : *val);
+
+ if (destination == SBI_ICLK)
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= SBI_CTL_OP_WR;
+ intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
+ drm_err(display->drm, "timeout waiting for SBI to complete read\n");
+ return -ETIMEDOUT;
+ }
+
+ if (cmd & SBI_RESPONSE_FAIL) {
+ drm_err(display->drm, "error during SBI read of reg %x\n", reg);
+ return -ENXIO;
+ }
+
+ if (is_read)
+ *val = intel_de_read_fw(display, SBI_DATA);
+
+ return 0;
+}
+
+void intel_sbi_lock(struct intel_display *display)
+{
+ mutex_lock(&display->sbi.lock);
+}
+
+void intel_sbi_unlock(struct intel_display *display)
+{
+ mutex_unlock(&display->sbi.lock);
+}
+
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(display, reg, destination, &result, true);
+
+ return result;
+}
+
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ intel_sbi_rw(display, reg, destination, &value, false);
+}
+
+void intel_sbi_init(struct intel_display *display)
+{
+ mutex_init(&display->sbi.lock);
+}
+
+void intel_sbi_fini(struct intel_display *display)
+{
+ mutex_destroy(&display->sbi.lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.h b/drivers/gpu/drm/i915/display/intel_sbi.h
new file mode 100644
index 000000000000..841f77a142a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SBI_H_
+#define _INTEL_SBI_H_
+
+#include <linux/types.h>
+
+struct intel_display;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+void intel_sbi_init(struct intel_display *display);
+void intel_sbi_fini(struct intel_display *display);
+void intel_sbi_lock(struct intel_display *display);
+void intel_sbi_unlock(struct intel_display *display);
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_sbi_regs.h b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
new file mode 100644
index 000000000000..ec76652de02d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_SBI_REGS_H__
+#define __INTEL_SBI_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/*
+ * Sideband Interface (SBI) is programmed indirectly, via SBI_ADDR, which
+ * contains the register offset; and SBI_DATA, which contains the payload.
+ */
+#define SBI_ADDR _MMIO(0xC6000)
+#define SBI_ADDR_MASK REG_GENMASK(31, 16)
+#define SBI_ADDR_VALUE(addr) REG_FIELD_PREP(SBI_ADDR_MASK, (addr))
+
+#define SBI_DATA _MMIO(0xC6004)
+
+#define SBI_CTL_STAT _MMIO(0xC6008)
+#define SBI_CTL_DEST_MASK REG_GENMASK(16, 16)
+#define SBI_CTL_DEST_ICLK REG_FIELD_PREP(SBI_CTL_DEST_MASK, 0)
+#define SBI_CTL_DEST_MPHY REG_FIELD_PREP(SBI_CTL_DEST_MASK, 1)
+#define SBI_CTL_OP_MASK REG_GENMASK(15, 8)
+#define SBI_CTL_OP_IORD REG_FIELD_PREP(SBI_CTL_OP_MASK, 2)
+#define SBI_CTL_OP_IOWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 3)
+#define SBI_CTL_OP_CRRD REG_FIELD_PREP(SBI_CTL_OP_MASK, 6)
+#define SBI_CTL_OP_CRWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 7)
+#define SBI_CTL_OP_WR REG_BIT(8)
+#define SBI_RESPONSE_MASK REG_GENMASK(2, 1)
+#define SBI_RESPONSE_FAIL REG_FIELD_PREP(SBI_RESPONSE_MASK, 1)
+#define SBI_RESPONSE_SUCCESS REG_FIELD_PREP(SBI_RESPONSE_MASK, 0)
+#define SBI_STATUS_MASK REG_GENMASK(0, 0)
+#define SBI_STATUS_BUSY REG_FIELD_PREP(SBI_STATUS_MASK, 1)
+#define SBI_STATUS_READY REG_FIELD_PREP(SBI_STATUS_MASK, 0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE 0x0200
+
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
+#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
+
+#define SBI_SSCDITHPHASE 0x0204
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
+
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
+
+#define SBI_DBUFF0 0x2a00
+
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
+
+#endif /* __INTEL_SBI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6e2d9929b4d7..87aff2754f69 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -36,16 +36,16 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -214,18 +214,17 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct intel_display *display = to_intel_display(&intel_sdvo->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bval = val, cval = val;
int i;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
}
@@ -1360,14 +1359,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->hw.mode;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(pipe_config))
return -EINVAL;
@@ -1527,7 +1525,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
@@ -1634,7 +1631,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1670,13 +1667,12 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
else if (display->platform.cherryview)
*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
@@ -1841,7 +1837,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -1861,7 +1856,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -2036,7 +2031,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(display))
+ if (!HAS_HOTPLUG(display))
return 0;
/*
@@ -3367,9 +3362,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
static bool is_sdvo_port_valid(struct intel_display *display, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return port == PORT_B;
else
return port == PORT_B || port == PORT_C;
@@ -3384,7 +3377,6 @@ static bool assert_sdvo_port_valid(struct intel_display *display, enum port port
bool intel_sdvo_init(struct intel_display *display,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -3427,7 +3419,7 @@ bool intel_sdvo_init(struct intel_display *display,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index c6321dafef4f..7fe6b4a18213 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -41,12 +41,12 @@ static s64 interp(s64 x, s64 x1, s64 x2, s64 y1, s64 y2)
{
s64 dydx;
- dydx = DIV_ROUND_UP_ULL((y2 - y1) * 100000, (x2 - x1));
+ dydx = DIV64_U64_ROUND_UP((y2 - y1) * 100000, (x2 - x1));
- return (y1 + DIV_ROUND_UP_ULL(dydx * (x - x1), 100000));
+ return (y1 + DIV64_U64_ROUND_UP(dydx * (x - x1), 100000));
}
-static void get_ana_cp_int_prop(u32 vco_clk,
+static void get_ana_cp_int_prop(u64 vco_clk,
u32 refclk_postscalar,
int mpll_ana_v2i,
int c, int a,
@@ -103,10 +103,10 @@ static void get_ana_cp_int_prop(u32 vco_clk,
DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER)));
ana_cp_int_temp =
- DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
- CURVE2_MULTIPLIER);
+ DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
+ CURVE2_MULTIPLIER);
- *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
+ *ana_cp_int = clamp(ana_cp_int_temp, 1, 127);
curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int);
@@ -115,17 +115,17 @@ static void get_ana_cp_int_prop(u32 vco_clk,
CURVE0_MULTIPLIER));
scaled_interpolated_sqrt =
- int_sqrt(DIV_ROUND_UP_ULL(interpolated_product, vco_div_refclk_float) *
+ int_sqrt(DIV64_U64_ROUND_UP(interpolated_product, vco_div_refclk_float) *
DIV_ROUND_DOWN_ULL(1000000000000ULL, 55));
/* Scale vco_div_refclk for ana_cp_int */
scaled_vco_div_refclk2 = DIV_ROUND_UP_ULL(vco_div_refclk_float, 1000000);
- adjusted_vco_clk2 = 1460281 * DIV_ROUND_UP_ULL(scaled_interpolated_sqrt *
+ adjusted_vco_clk2 = 1460281 * DIV64_U64_ROUND_UP(scaled_interpolated_sqrt *
scaled_vco_div_refclk2,
curve_1_interpolated);
- *ana_cp_prop = DIV_ROUND_UP_ULL(adjusted_vco_clk2, curve_2_scaled2);
- *ana_cp_prop = max(1, min(*ana_cp_prop, 127));
+ *ana_cp_prop = DIV64_U64_ROUND_UP(adjusted_vco_clk2, curve_2_scaled2);
+ *ana_cp_prop = clamp(*ana_cp_prop, 1, 127);
}
static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
@@ -165,10 +165,10 @@ static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
/* Select appropriate v2i point */
if (datarate <= INTEL_SNPS_PHY_HDMI_9999MHZ) {
mpll_ana_v2i = 2;
- tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
+ tx_clk_div = ilog2(div64_u64(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
} else {
mpll_ana_v2i = 3;
- tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
+ tx_clk_div = ilog2(div64_u64(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
}
vco_clk = (datarate << tx_clk_div) >> 1;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b9acd9fe160c..b2dd69a11124 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,11 +5,13 @@
#include <linux/math.h>
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1ad6c8a94b3d..e6844df837af 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -36,15 +36,16 @@
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
-#include "intel_atomic_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
#include "intel_sprite_regs.h"
@@ -1365,8 +1366,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -1420,10 +1421,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 1d0b84b464c1..4981cc34da05 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -3,21 +3,21 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_sprite_uapi.h"
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+static bool has_dst_key_in_primary_plane(struct intel_display *display)
{
- return DISPLAY_VER(dev_priv) >= 9;
+ return DISPLAY_VER(display) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
@@ -34,7 +34,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
@@ -43,7 +43,6 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -61,7 +60,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -74,7 +73,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
to_intel_plane(plane)->id >= PLANE_3 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -99,7 +98,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ if (!ret && has_dst_key_in_primary_plane(display)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display,
to_intel_plane(plane)->pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8d14ed8a56e..3bc57579fe53 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,8 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -12,6 +14,7 @@
#include "intel_display.h"
#include "intel_display_driver.h"
#include "intel_display_power_map.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -92,11 +95,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
return dig_port->tc;
}
-static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
-{
- return to_i915(tc->dig_port->base.base.dev);
-}
-
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
@@ -219,10 +217,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
+ struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+ drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
#endif
__tc_cold_unblock(tc, domain, wakeref);
}
@@ -266,13 +265,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
- drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
@@ -281,13 +280,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
+ pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
- drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
@@ -297,13 +296,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
pin_assignment =
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
@@ -369,7 +367,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
@@ -377,10 +375,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_tc_port_get_max_lane_count(dig_port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return mtl_tc_port_get_max_lane_count(dig_port);
return intel_tc_port_get_max_lane_count(dig_port);
@@ -389,20 +387,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->lane_reversal;
u32 val;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
@@ -423,16 +421,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 valid_hpd_mask;
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
if (hweight32(live_status_mask) != 1)
return;
@@ -447,7 +445,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
tc->port_name, live_status_mask, valid_hpd_mask);
@@ -490,21 +488,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
- fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
- pch_isr = intel_de_read(i915, SDEISR);
+ fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(display, SDEISR);
}
if (fia_isr == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, nothing connected\n",
tc->port_name);
return mask;
@@ -531,14 +528,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -550,14 +547,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
tc->port_name, take ? "take" : "release");
@@ -568,21 +565,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assume not owned\n",
tc->port_name);
return false;
@@ -619,30 +616,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
max_lanes = intel_tc_port_max_lane_count(dig_port);
if (tc->mode == TC_PORT_LEGACY) {
- drm_WARN_ON(&i915->drm, max_lanes != 4);
+ drm_WARN_ON(display->drm, max_lanes != 4);
return true;
}
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
tc->port_name);
return false;
}
if (max_lanes < required_lanes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
tc->port_name,
max_lanes, required_lanes);
@@ -655,7 +652,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
static bool icl_tc_phy_connect(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
tc->lock_wakeref = tc_cold_block(tc);
@@ -664,8 +661,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
tc->port_name,
str_yes_no(tc_phy_is_ready(tc)));
goto out_unblock_tc_cold;
@@ -733,14 +730,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
- val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
}
@@ -775,19 +771,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
@@ -810,15 +805,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -830,12 +825,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
assert_tc_port_power_enabled(tc);
- intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
@@ -843,13 +838,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
u32 val;
assert_tc_port_power_enabled(tc);
- val = intel_de_read(i915, DDI_BUF_CTL(port));
+ val = intel_de_read(display, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -872,7 +867,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
@@ -885,15 +879,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
tc->port_name);
goto out_put_port_power;
}
if (!tc_phy_is_ready(tc) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
tc->port_name);
goto out_release_phy;
}
@@ -965,19 +959,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
@@ -994,22 +987,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
tc->port_name);
@@ -1069,7 +1062,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
@@ -1082,7 +1075,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
return true;
out_disable:
- if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
return false;
if (!enable)
@@ -1096,35 +1089,35 @@ out_disable:
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ val = intel_de_read(display, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
@@ -1134,7 +1127,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1207,13 +1200,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 mask;
mask = tc->phy_ops->hpd_live_status(tc);
/* The sink can be connected only in a single mode. */
- drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+ drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
return mask;
}
@@ -1236,9 +1229,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
@@ -1246,8 +1239,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
static bool tc_phy_is_connected(struct intel_tc_port *tc,
enum icl_port_dpll_id port_pll_type)
{
- struct intel_encoder *encoder = &tc->dig_port->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool phy_is_ready = tc_phy_is_ready(tc);
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
@@ -1257,7 +1249,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
tc->port_name,
str_yes_no(is_connected),
@@ -1270,10 +1262,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(tc_phy_is_ready(tc), 500)) {
- drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
return false;
@@ -1343,7 +1335,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
bool phy_is_ready;
bool phy_is_owned;
@@ -1363,11 +1355,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
- drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
tc->port_name,
tc_port_mode_name(mode),
@@ -1407,7 +1399,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc)
static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 live_status_mask = tc_phy_hpd_live_status(tc);
bool connected;
@@ -1421,7 +1413,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
connected = tc->phy_ops->connect(tc, required_lanes);
}
- drm_WARN_ON(&i915->drm, !connected);
+ drm_WARN_ON(display->drm, !connected);
}
static void tc_phy_disconnect(struct intel_tc_port *tc)
@@ -1491,12 +1483,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc)
static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
assert_tc_port_power_enabled(tc);
- return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
@@ -1509,15 +1501,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc)
*/
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool update_mode = false;
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->lock_wakeref);
- drm_WARN_ON(&i915->drm, tc->link_refcount);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->lock_wakeref);
+ drm_WARN_ON(display->drm, tc->link_refcount);
tc_phy_get_hw_state(tc);
/*
@@ -1540,8 +1532,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
if (!tc_port_is_enabled(tc)) {
update_mode = true;
} else if (tc->mode == TC_PORT_DISCONNECTED) {
- drm_WARN_ON(&i915->drm, !tc->legacy_port);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, !tc->legacy_port);
+ drm_err(display->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
tc->port_name);
update_mode = true;
@@ -1556,28 +1548,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
mutex_unlock(&tc->lock);
}
-static bool tc_port_has_active_links(struct intel_tc_port *tc,
- const struct intel_crtc_state *crtc_state)
+static bool tc_port_has_active_streams(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
- int active_links = 0;
+ int active_streams = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
- active_links = intel_dp_mst_encoder_active_links(dig_port);
+ active_streams = intel_dp_mst_active_streams(&dig_port->dp);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
- active_links = 1;
+ active_streams = 1;
}
- if (active_links && !tc_phy_is_connected(tc, pll_type))
- drm_err(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- tc->port_name, active_links);
+ if (active_streams && !tc_phy_is_connected(tc, pll_type))
+ drm_err(display->drm,
+ "Port %s: PHY disconnected with %d active stream(s)\n",
+ tc->port_name, active_streams);
- return active_links;
+ return active_streams;
}
/**
@@ -1595,13 +1587,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc,
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
- if (!tc_port_has_active_links(tc, crtc_state)) {
+ drm_WARN_ON(display->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_streams(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
@@ -1610,7 +1602,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
if (tc->init_mode != TC_PORT_TBT_ALT &&
tc->init_mode != TC_PORT_DISCONNECTED)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
tc->port_name,
tc_port_mode_name(tc->init_mode));
@@ -1618,7 +1610,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
tc->port_name,
tc_port_mode_name(tc->mode));
@@ -1637,12 +1629,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 mask = ~0;
- drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
if (tc->mode != TC_PORT_DISCONNECTED)
mask = BIT(tc->mode);
@@ -1677,14 +1669,14 @@ static int reset_link_commit(struct intel_tc_port *tc,
struct intel_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
struct intel_crtc *crtc;
u8 pipe_mask;
int ret;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
@@ -1695,7 +1687,7 @@ static int reset_link_commit(struct intel_tc_port *tc,
if (!pipe_mask)
return 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -1713,13 +1705,13 @@ static int reset_link_commit(struct intel_tc_port *tc,
static int reset_link(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *_state;
struct intel_atomic_state *state;
int ret;
- _state = drm_atomic_state_alloc(&i915->drm);
+ _state = drm_atomic_state_alloc(display->drm);
if (!_state)
return -ENOMEM;
@@ -1738,21 +1730,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work)
{
struct intel_tc_port *tc =
container_of(work, struct intel_tc_port, link_reset_work.work);
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
int ret;
if (!__intel_tc_port_link_needs_reset(tc))
return;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: TypeC DP-alt sink disconnected, resetting link\n",
tc->port_name);
ret = reset_link(tc);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
@@ -1780,7 +1772,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
mutex_lock(&tc->lock);
@@ -1790,9 +1782,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc,
intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(tc));
+ drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
@@ -1885,12 +1876,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
@@ -1900,11 +1891,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
tc->phy_ops = &xelpdp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
else
tc->phy_ops = &icl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbe857ea85b..e3ab49815a3c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,15 +33,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_irq.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -1585,19 +1585,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_disable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
@@ -1668,11 +1666,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 7b240ce681a0..70ba7aa26bf4 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -6,10 +6,10 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
@@ -224,12 +224,13 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
return 1;
- else if (DISPLAY_VER(display) == 2)
- return -1;
- else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return 2;
- else
+ else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1;
+ else if (DISPLAY_VER(display) >= 3)
return 1;
+ else
+ return -1;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index e9b809568cd4..92c04811aa28 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -446,7 +446,7 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * When we copy the child device configs to display->vbt.child_dev, we
* reserve space for the full structure below, and initialize the tail not
* actually present in VBT to zeros. Accessing those fields is fine, as long as
* the default zero is taken into account, again according to the BDB version.
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3ed64c17bdff..8e799e225af1 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,6 +9,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_crtc.h"
@@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
return 0;
}
+static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+
+ return vdsc_cfg->dsc_version_major == 1 &&
+ vdsc_cfg->dsc_version_minor == 1 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI);
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(pipe_config);
@@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* From XE_LPD onwards we supports compression bpps in steps of 1
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
+ *
+ * We don't want to calculate all rc parameters when the panel
+ * is MIPI DSI and it's using DSC 1.1. The reason being that some
+ * DSI panels vendors have hardcoded PPS params in the VBT causing
+ * the parameters sent from the source which are derived through
+ * interpolation to differ from the params the panel expects.
+ * This causes a noise in the display.
+ * Furthermore for DSI panels we are currently using bits_per_pixel
+ * (compressed bpp) hardcoded from VBT, (unlike other encoders where we
+ * find the optimum compressed bpp) so dont need to rely on interpolation,
+ * as we can get the required rc parameters from the tables.
*/
- if (DISPLAY_VER(display) >= 13) {
+ if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 684b5d1bc87c..6e125564db34 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -4,15 +4,19 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <video/vga.h>
+
#include "soc/intel_gmch.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display.h"
#include "intel_vga.h"
+#include "intel_vga_regs.h"
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
@@ -24,16 +28,42 @@ static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
return VGACNTRL;
}
+static bool has_vga_pipe_sel(struct intel_display *display)
+{
+ if (display->platform.i845g ||
+ display->platform.i865g)
+ return false;
+
+ if (display->platform.valleyview ||
+ display->platform.cherryview)
+ return true;
+
+ return DISPLAY_VER(display) < 7;
+}
+
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ enum pipe pipe;
+ u32 tmp;
u8 sr1;
- if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)
+ tmp = intel_de_read(display, vga_reg);
+ if (tmp & VGA_DISP_DISABLE)
return;
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(0x01, VGA_SEQ_I);
@@ -46,39 +76,6 @@ void intel_vga_disable(struct intel_display *display)
intel_de_posting_read(display, vga_reg);
}
-void intel_vga_redisable_power_on(struct intel_display *display)
-{
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
-
- if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) {
- drm_dbg_kms(display->drm,
- "Something enabled VGA plane, disabling it\n");
- intel_vga_disable(display);
- }
-}
-
-void intel_vga_redisable(struct intel_display *display)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(display, POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- intel_vga_redisable_power_on(display);
-
- intel_display_power_put(display, POWER_DOMAIN_VGA, wakeref);
-}
-
void intel_vga_reset_io_mem(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 824dfc32a199..16d699f3b641 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -10,8 +10,6 @@ struct intel_display;
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-void intel_vga_redisable(struct intel_display *display);
-void intel_vga_redisable_power_on(struct intel_display *display);
int intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_vga_regs.h b/drivers/gpu/drm/i915/display/intel_vga_regs.h
new file mode 100644
index 000000000000..cbacced1a69f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga_regs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_REGS_H__
+#define __INTEL_VGA_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define VGACNTRL _MMIO(0x71400)
+#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
+#define CPU_VGACNTRL _MMIO(0x41000)
+#define VGA_DISP_DISABLE REG_BIT(31)
+#define VGA_2X_MODE REG_BIT(30) /* pre-ilk */
+#define VGA_PIPE_SEL_MASK REG_BIT(29) /* pre-ivb */
+#define VGA_PIPE_SEL(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK, (pipe))
+#define VGA_PIPE_SEL_MASK_CHV REG_GENMASK(29, 28) /* chv */
+#define VGA_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK_CHV, (pipe))
+#define VGA_BORDER_ENABLE REG_BIT(26)
+#define VGA_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */
+#define VGA_CENTERING_ENABLE_MASK REG_GENMASK(25, 24) /* pre-ilk */
+#define VGA_PALETTE_READ_SEL REG_BIT(23) /* pre-ivb */
+#define VGA_PALETTE_A_WRITE_DISABLE REG_BIT(22) /* pre-ivb */
+#define VGA_PALETTE_B_WRITE_DISABLE REG_BIT(21) /* pre-ivb */
+#define VGA_LEGACY_8BIT_PALETTE_ENABLE REG_BIT(20)
+#define VGA_PALETTE_BYPASS REG_BIT(19)
+#define VGA_NINE_DOT_DISABLE REG_BIT(18)
+#define VGA_PALETTE_READ_SEL_HI_CHV REG_BIT(15) /* chv */
+#define VGA_PALETTE_C_WRITE_DISABLE_CHV REG_BIT(14) /* chv */
+#define VGA_ACTIVE_THROTTLING_MASK REG_GENMASK(15, 12) /* ilk+ */
+#define VGA_BLANK_THROTTLING_MASK REG_GENMASK(11, 8) /* ilk+ */
+#define VGA_BLINK_DUTY_CYCLE_MASK REG_GENMASK(7, 6)
+#define VGA_VSYNC_BLINK_RATE_MASK REG_GENMASK(5, 0)
+
+#endif /* __INTEL_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index cac49319026d..3eed37f271b0 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,8 +4,10 @@
*
*/
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_vrr.h"
@@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
fallthrough;
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return false;
intel_dp = intel_attached_dp(connector);
if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
@@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- if (!HAS_CMRR(display))
+ /* Avoid CMRR for now till we have VRR with fixed timings working */
+ if (!HAS_CMRR(display) || true)
return false;
actual_refresh_k =
@@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
return vtotal;
}
+static
+void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->cmrr.enable = true;
+ /*
+ * TODO: Compute precise target refresh rate to determine
+ * if video_mode_required should be true. Currently set to
+ * false due to uncertainty about the precise target
+ * refresh Rate.
+ */
+ crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+static
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_vtotal;
+ else
+ return crtc_vtotal -
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_fixed_rr_vtotal(crtc_state) -
+ intel_vrr_flipline_offset(display);
+}
+
+static
+int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ intel_vrr_fixed_rr_flipline(crtc_state) - 1);
+}
+
+static
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For fixed rr, vmin = vmax = flipline.
+ * vmin is already set to crtc_vtotal set vmax and flipline the same.
+ */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * To make fixed rr and vrr work seamless the guardband/pipeline full
+ * should be set such that it satisfies both the fixed and variable
+ * timings.
+ * For this set the vmin as crtc_vtotal. With this we never need to
+ * change anything to do with the guardband.
+ */
+ return crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmax(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmax;
+
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ return vmax;
+}
+
void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = intel_attached_dp(connector);
bool is_edp = intel_dp_is_edp(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- /*
- * FIXME all joined pipes share the same transcoder.
- * Need to account for that during VRR toggle/push/etc.
- */
- if (crtc_state->joiner_pipes)
+ if (!HAS_VRR(display))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.in_range =
intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
- if (!crtc_state->vrr.in_range)
- return;
-
- if (HAS_LRR(display))
- crtc_state->update_lrr = true;
- vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
- adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
- vmax = adjusted_mode->crtc_clock * 1000 /
- (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ /*
+ * Allow fixed refresh rate with VRR Timing Generator.
+ * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
+ * VRR and LRR.
+ * #TODO For actual VRR with joiner, we need to figure out how to
+ * correctly sequence transcoder level stuff vs. pipe level stuff
+ * in the commit.
+ */
+ if (crtc_state->joiner_pipes)
+ crtc_state->vrr.in_range = false;
- vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
- vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+ vmin = intel_vrr_compute_vmin(crtc_state);
- if (vmin >= vmax)
- return;
+ if (crtc_state->vrr.in_range) {
+ if (HAS_LRR(display))
+ crtc_state->update_lrr = true;
+ vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
+ } else {
+ vmax = vmin;
+ }
crtc_state->vrr.vmin = vmin;
crtc_state->vrr.vmax = vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ if (crtc_state->uapi.vrr_enabled && vmin < vmax)
+ intel_vrr_compute_vrr_timings(crtc_state);
+ else if (is_cmrr_frac_required(crtc_state) && is_edp)
+ intel_vrr_compute_cmrr_timings(crtc_state);
+ else
+ intel_vrr_compute_fixed_rr_timings(crtc_state);
+
/*
* flipline determines the min vblank length the hardware will
* generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
@@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
- /*
- * When panel is VRR capable and userspace has
- * not enabled adaptive sync mode then Fixed Average
- * Vtotal mode should be enabled.
- */
- if (crtc_state->uapi.vrr_enabled) {
- crtc_state->vrr.enable = true;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- } else if (is_cmrr_frac_required(crtc_state) && is_edp) {
- crtc_state->vrr.enable = true;
- crtc_state->cmrr.enable = true;
- /*
- * TODO: Compute precise target refresh rate to determine
- * if video_mode_required should be true. Currently set to
- * false due to uncertainty about the precise target
- * refresh Rate.
- */
- crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
- crtc_state->vrr.vmin = crtc_state->vrr.vmax;
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- }
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
@@ -340,7 +444,10 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 14)
+ return VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -380,14 +487,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
- intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+
+ if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
if (HAS_AS_SDP(display))
intel_de_write(display,
@@ -461,6 +565,36 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
+{
+ if (!HAS_VRR(display))
+ return false;
+
+ if (DISPLAY_VER(display) >= 30)
+ return true;
+
+ return false;
+}
+
+static
+void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -469,16 +603,27 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ crtc_state->vrr.vmin - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ crtc_state->vrr.vmax - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ crtc_state->vrr.flipline - 1);
+
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
+ if (crtc_state->cmrr.enable) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
+ trans_vrr_ctl(crtc_state));
+ } else {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ }
}
}
@@ -490,24 +635,79 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ }
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
+}
+
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
+ return;
+ }
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN);
+
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+
+ intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline &&
+ crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
+ crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+}
+
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
+ bool vrr_enable;
trans_vrr_ctl = intel_de_read(display,
TRANS_VRR_CTL(display, cpu_transcoder));
- crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
@@ -536,6 +736,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not filled. Since for these platforms TRAN_VMIN is always
+ * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
+ * adjusted_mode.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vtotal =
+ intel_vrr_vmin_vtotal(crtc_state);
+
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
intel_de_read(display,
@@ -547,6 +757,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
}
}
+ vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
+ else
+ crtc_state->vrr.enable = vrr_enable;
+
+ /*
+ * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
+ * Since CMRR is currently disabled, set this flag for VRR for now.
+ * Need to keep this in mind while re-enabling CMRR.
+ */
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 514822577e8a..38bf9996b883 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -13,6 +13,7 @@ struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
+struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
index 6ed0e0dc97e7..ba9b9215dc11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
@@ -8,120 +8,119 @@
#include "intel_display_reg_defs.h"
-/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
-#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
+#define TRANS_VRR_CTL(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
-#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VMAX(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
-#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+#define TRANS_VRR_VMIN(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VMAXSHIFT_A)
-#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
-#define VRR_VMAXSHIFT_DEC REG_BIT(16)
-#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+#define TRANS_VRR_VMAXSHIFT(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
#define _TRANS_VRR_STATUS_A 0x6042c
#define _TRANS_VRR_STATUS_B 0x6142c
#define _TRANS_VRR_STATUS_C 0x6242c
#define _TRANS_VRR_STATUS_D 0x6342c
-#define TRANS_VRR_STATUS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
-#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
-#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
-#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
-#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
-#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
-#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
-#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
-#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
-#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
-#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
-#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
-#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
-#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
-#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+#define TRANS_VRR_STATUS(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VTOTAL_PREV_A)
-#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
-#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
-#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
-#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VTOTAL_PREV(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_FLIPLINE_A 0x60438
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_FLIPLINE_A)
-#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_FLIPLINE(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_STATUS2_A 0x6043c
#define _TRANS_VRR_STATUS2_B 0x6143c
#define _TRANS_VRR_STATUS2_C 0x6243c
#define _TRANS_VRR_STATUS2_D 0x6343c
-#define TRANS_VRR_STATUS2(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
-#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_STATUS2(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60a70
#define _TRANS_PUSH_B 0x61a70
#define _TRANS_PUSH_C 0x62a70
#define _TRANS_PUSH_D 0x63a70
-#define TRANS_PUSH(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
-#define TRANS_PUSH_EN REG_BIT(31)
-#define TRANS_PUSH_SEND REG_BIT(30)
+#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
#define _TRANS_VRR_VSYNC_A 0x60078
-#define TRANS_VRR_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
-#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
-#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
-#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
-#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
-
-/*CMRR Registers*/
+#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
+/* Common register for HDMI EMP and DP AS SDP */
+#define _EMP_AS_SDP_TL_A 0x60204
+#define EMP_AS_SDP_TL(display, trans) _MMIO_TRANS2((display), (trans), _EMP_AS_SDP_TL_A)
+#define EMP_AS_SDP_DB_TL_MASK REG_GENMASK(12, 0)
+#define EMP_AS_SDP_DB_TL(db_transmit_line) REG_FIELD_PREP(EMP_AS_SDP_DB_TL_MASK, (db_transmit_line))
#define _TRANS_CMRR_M_LO_A 0x604F0
-#define TRANS_CMRR_M_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_LO_A)
+#define TRANS_CMRR_M_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_LO_A)
#define _TRANS_CMRR_M_HI_A 0x604F4
-#define TRANS_CMRR_M_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_HI_A)
+#define TRANS_CMRR_M_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_HI_A)
#define _TRANS_CMRR_N_LO_A 0x604F8
-#define TRANS_CMRR_N_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_LO_A)
+#define TRANS_CMRR_N_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_LO_A)
#define _TRANS_CMRR_N_HI_A 0x604FC
-#define TRANS_CMRR_N_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_HI_A)
-
-#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define TRANS_CMRR_N_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_HI_A)
#endif /* __INTEL_VRR_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index f00f4cfc58e5..bba82e888db2 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,15 +5,18 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i9xx_wm.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_wm.h"
#include "skl_watermark.h"
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
- * @i915: i915 device
+ * @display: display device
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -44,10 +47,10 @@
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_i915_private *i915)
+void intel_update_watermarks(struct intel_display *display)
{
- if (i915->display.funcs.wm->update_wm)
- i915->display.funcs.wm->update_wm(i915);
+ if (display->funcs.wm->update_wm)
+ display->funcs.wm->update_wm(display);
}
int intel_wm_compute(struct intel_atomic_state *state,
@@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state,
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->initial_watermarks) {
- i915->display.funcs.wm->initial_watermarks(state, crtc);
+ if (display->funcs.wm->initial_watermarks) {
+ display->funcs.wm->initial_watermarks(state, crtc);
return true;
}
@@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state,
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->atomic_update_watermarks)
- i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+ if (display->funcs.wm->atomic_update_watermarks)
+ display->funcs.wm->atomic_update_watermarks(state, crtc);
}
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->optimize_watermarks)
- i915->display.funcs.wm->optimize_watermarks(state, crtc);
+ if (display->funcs.wm->optimize_watermarks)
+ display->funcs.wm->optimize_watermarks(state, crtc);
}
int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->compute_global_watermarks)
- return i915->display.funcs.wm->compute_global_watermarks(state);
+ if (display->funcs.wm->compute_global_watermarks)
+ return display->funcs.wm->compute_global_watermarks(state);
return 0;
}
-void intel_wm_get_hw_state(struct drm_i915_private *i915)
+void intel_wm_get_hw_state(struct intel_display *display)
{
- if (i915->display.funcs.wm->get_hw_state)
- return i915->display.funcs.wm->get_hw_state(i915);
+ if (display->funcs.wm->get_hw_state)
+ return display->funcs.wm->get_hw_state(display);
}
-void intel_wm_sanitize(struct drm_i915_private *i915)
+void intel_wm_sanitize(struct intel_display *display)
{
- if (i915->display.funcs.wm->sanitize)
- return i915->display.funcs.wm->sanitize(i915);
+ if (display->funcs.wm->sanitize)
+ return display->funcs.wm->sanitize(display);
}
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
return plane_state->uapi.visible;
}
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[])
{
int level;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency not provided\n",
name, level);
continue;
@@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency %u (%u.%u usec)\n", name, level,
wm[level], latency / 10, latency % 10);
}
}
-void intel_wm_init(struct drm_i915_private *i915)
+void intel_wm_init(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
- skl_wm_init(i915);
+ if (DISPLAY_VER(display) >= 9)
+ skl_wm_init(display);
else
- i9xx_wm_init(i915);
+ i9xx_wm_init(display);
}
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
int level;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
/*
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.valleyview ||
+ display->platform.cherryview ||
+ display->platform.g4x)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+ return single_open(file, pri_wm_latency_show, display);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+ return single_open(file, spr_wm_latency_show, display);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+ return single_open(file, cur_wm_latency_show, display);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 new[8] = {};
int level;
int ret;
@@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
&new[0], &new[1], &new[2], &new[3],
&new[4], &new[5], &new[6], &new[7]);
- if (ret != dev_priv->display.wm.num_levels)
+ if (ret != display->wm.num_levels)
return -EINVAL;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return len;
}
@@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-void intel_wm_debugfs_register(struct drm_i915_private *i915)
+void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_pri_wm_latency_fops);
+ display, &i915_pri_wm_latency_fops);
debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_spr_wm_latency_fops);
+ display, &i915_spr_wm_latency_fops);
debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_cur_wm_latency_fops);
+ display, &i915_cur_wm_latency_fops);
- skl_watermark_debugfs_register(i915);
+ skl_watermark_debugfs_register(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 7d3a447054b3..9ad4e9eae5ca 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
-void intel_update_watermarks(struct drm_i915_private *i915);
+void intel_update_watermarks(struct intel_display *display);
int intel_wm_compute(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
@@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state,
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
-void intel_wm_get_hw_state(struct drm_i915_private *i915);
-void intel_wm_sanitize(struct drm_i915_private *i915);
+void intel_wm_get_hw_state(struct intel_display *display);
+void intel_wm_sanitize(struct intel_display *display);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *i915,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[]);
-void intel_wm_init(struct drm_i915_private *i915);
-void intel_wm_debugfs_register(struct drm_i915_private *i915);
+void intel_wm_init(struct intel_display *display);
+void intel_wm_debugfs_register(struct intel_display *display);
#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ee81220a7c88..d77798499c57 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,9 +3,11 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -693,15 +695,14 @@ static void glk_program_nearest_filter_coefs(struct intel_display *display,
GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter)
{
- if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR)
return (PS_FILTER_PROGRAMMED |
- PS_Y_VERT_FILTER_SELECT(set) |
- PS_Y_HORZ_FILTER_SELECT(set) |
- PS_UV_VERT_FILTER_SELECT(set) |
- PS_UV_HORZ_FILTER_SELECT(set));
- }
+ PS_Y_VERT_FILTER_SELECT(0) |
+ PS_Y_HORZ_FILTER_SELECT(0) |
+ PS_UV_VERT_FILTER_SELECT(0) |
+ PS_UV_HORZ_FILTER_SELECT(0));
return PS_FILTER_MEDIUM;
}
@@ -759,7 +760,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
id = scaler_state->scaler_id;
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
- skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(crtc_state->hw.scaling_filter);
trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
@@ -825,7 +826,7 @@ skl_program_plane_scaler(struct intel_dsb *dsb,
}
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
- skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(plane_state->hw.scaling_filter);
trace_intel_plane_scaler_update_arm(plane, scaler_id,
crtc_x, crtc_y, crtc_w, crtc_h);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 70e550539bb2..e20972ddfa09 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -8,24 +8,24 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
-#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
@@ -601,7 +601,7 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
* Figure out what's going on here...
*/
if (display->platform.alderlake_p &&
- intel_plane_can_async_flip(plane, fb->modifier))
+ intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return mult * 16 * 1024;
switch (fb->modifier) {
@@ -2327,8 +2327,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
max_scale = skl_plane_max_scale(display, fb);
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -2666,6 +2666,7 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs icl_plane_funcs = {
@@ -2675,6 +2676,7 @@ static const struct drm_plane_funcs icl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = icl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs tgl_plane_funcs = {
@@ -2684,28 +2686,29 @@ static const struct drm_plane_funcs tgl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = tgl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static void
skl_plane_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static void
skl_plane_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static bool skl_plane_has_rc_ccs(struct intel_display *display,
@@ -2788,6 +2791,32 @@ static u8 tgl_plane_caps(struct intel_display *display,
return caps;
}
+static void skl_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_plane_state *state = to_intel_plane_state(plane->base.state);
+ struct intel_display *display = to_intel_display(plane);
+ const struct drm_framebuffer *fb = state->hw.fb;
+ u32 plane_ctl;
+
+ plane_ctl = intel_de_read(display, PLANE_CTL(plane->pipe, plane->id));
+
+ if (intel_fb_uses_dpt(fb)) {
+ /* if DPT is enabled, keep tiling, but disable compression */
+ plane_ctl &= ~PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ } else {
+ /* if DPT is not supported, disable tiling, and update stride */
+ u32 stride = state->view.color_plane[0].scanout_stride / 64;
+
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ intel_de_write_fw(display, PLANE_STRIDE(plane->pipe, plane->id),
+ PLANE_STRIDE_(stride));
+ }
+ intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
+
+ intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
+ skl_plane_surf(state, 0));
+}
+
struct intel_plane *
skl_universal_plane_create(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
@@ -2834,6 +2863,7 @@ skl_universal_plane_create(struct intel_display *display,
plane->max_height = skl_plane_max_height;
plane->min_cdclk = skl_plane_min_cdclk;
}
+ plane->disable_tiling = skl_disable_tiling;
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
@@ -3006,7 +3036,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 621e97943542..222c069fdadb 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,12 +6,14 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "soc/intel_dram.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
@@ -19,22 +21,40 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
+#include "intel_flipq.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_wm.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
-/*It is expected that DSB can do posted writes to every register in
- * the pipe and planes within 100us. For flip queue use case, the
- * recommended DSB execution time is 100us + one SAGV block time.
- */
-#define DSB_EXE_TIME 100
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
+ bool joined_mbus;
+};
+
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
-static void skl_sagv_disable(struct drm_i915_private *i915);
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+
+static void skl_sagv_disable(struct intel_display *display);
/* Stores plane specific WM parameters */
struct skl_wm_params {
@@ -69,24 +89,20 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+static bool skl_needs_memory_bw_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9;
+ return DISPLAY_VER(display) == 9;
}
bool
-intel_has_sagv(struct drm_i915_private *i915)
+intel_has_sagv(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
-intel_sagv_block_time(struct drm_i915_private *i915)
+intel_sagv_block_time(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -97,9 +113,9 @@ intel_sagv_block_time(struct drm_i915_private *i915)
u32 val = 0;
int ret;
- ret = snb_pcode_read(&i915->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
+ ret = intel_pcode_read(display->drm,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
if (ret) {
drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
@@ -115,10 +131,8 @@ intel_sagv_block_time(struct drm_i915_private *i915)
}
}
-static void intel_sagv_init(struct drm_i915_private *i915)
+static void intel_sagv_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
@@ -127,14 +141,14 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* For icl+ this was already determined by intel_bw_init_hw().
*/
if (DISPLAY_VER(display) < 11)
- skl_sagv_disable(i915);
+ skl_sagv_disable(display);
drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- display->sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(display);
drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
+ str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
@@ -142,7 +156,7 @@ static void intel_sagv_init(struct drm_i915_private *i915)
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
@@ -157,19 +171,19 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static void skl_sagv_enable(struct drm_i915_private *i915)
+static void skl_sagv_enable(struct intel_display *display)
{
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ if (display->sagv.status == I915_SAGV_ENABLED)
return;
- drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
+ drm_dbg_kms(display->drm, "Enabling SAGV\n");
+ ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -177,143 +191,78 @@ static void skl_sagv_enable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to enable SAGV\n");
+ drm_err(display->drm, "Failed to enable SAGV\n");
return;
}
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
}
-static void skl_sagv_disable(struct drm_i915_private *i915)
+static void skl_sagv_disable(struct intel_display *display)
{
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ if (display->sagv.status == I915_SAGV_DISABLED)
return;
- drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
+ ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
return;
}
- i915->display.sagv.status = I915_SAGV_DISABLED;
+ display->sagv.status = I915_SAGV_DISABLED;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
+ if (!intel_bw_can_enable_sagv(display, new_bw_state))
+ skl_sagv_disable(display);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
-}
-
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(i915, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
if (!new_bw_state)
return;
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ if (intel_bw_can_enable_sagv(display, new_bw_state))
+ skl_sagv_enable(display);
}
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -322,10 +271,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state);
else
skl_sagv_pre_plane_update(state);
@@ -333,7 +282,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -342,10 +291,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state);
else
skl_sagv_post_plane_update(state);
@@ -353,12 +302,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
int max_level = INT_MAX;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return false;
if (!crtc_state->hw.active)
@@ -377,7 +326,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = i915->display.wm.num_levels - 1;
+ for (level = display->wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -423,104 +372,27 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!i915->display.params.enable_sagv)
+ if (!display->params.enable_sagv)
return false;
- if (DISPLAY_VER(i915) >= 12)
+ /*
+ * SAGV is initially forced off because its current
+ * state can't be queried from pcode. Allow SAGV to
+ * be enabled upon the first real commit.
+ */
+ if (crtc_state->inherited)
+ return false;
+
+ if (DISPLAY_VER(display) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(i915) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case).
- *
- * drm_atomic_check_only() gets upset if we pull more crtcs
- * into the state, so we have to calculate this based on the
- * individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
- * crtcs not included in the commit would not switch to the
- * SAGV watermarks when we are about to enable SAGV, and that
- * would lead to underruns. This does mean extra power draw
- * when only a subset of the crtcs are blocking SAGV as the
- * other crtcs can't be allowed to use the more optimal
- * normal (ie. non-SAGV) watermarks.
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_crtc_can_enable_sagv(new_crtc_state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(i915, new_bw_state) !=
- intel_can_enable_sagv(i915, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -530,17 +402,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
return end;
}
-static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+static int intel_dbuf_slice_size(struct intel_display *display)
{
- return DISPLAY_INFO(i915)->dbuf.size /
- hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
+ return DISPLAY_INFO(display)->dbuf.size /
+ hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
}
static void
-skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
struct skl_ddb_entry *ddb)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
if (!slice_mask) {
ddb->start = 0;
@@ -552,10 +424,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
+ WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
}
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
{
struct skl_ddb_entry ddb;
@@ -564,15 +436,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask
else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
slice_mask = BIT(DBUF_S3);
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+ skl_ddb_entry_for_slices(display, slice_mask, &ddb);
return ddb.start;
}
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
enum dbuf_slice start_slice, end_slice;
u8 slice_mask = 0;
@@ -618,15 +490,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
unsigned int *weight_end,
unsigned int *weight_total)
{
- struct drm_i915_private *i915 =
- to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
enum pipe pipe;
*weight_start = 0;
*weight_end = 0;
*weight_total = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
int weight = dbuf_state->weight[pipe];
/*
@@ -652,7 +523,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned int weight_total, weight_start, weight_end;
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
@@ -674,8 +545,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
- skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -709,7 +580,7 @@ out:
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
crtc->base.base.id, crtc->base.name,
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
@@ -734,10 +605,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
-static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+static unsigned int skl_wm_latency(struct intel_display *display, int level,
const struct skl_wm_params *wp)
{
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = display->wm.skl_latency[level];
if (latency == 0)
return 0;
@@ -746,11 +617,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
+ if ((display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) && skl_watermark_ipc_enabled(display))
latency += 4;
- if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
latency += 15;
return latency;
@@ -760,8 +631,8 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
@@ -772,10 +643,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- unsigned int latency = skl_wm_latency(i915, level, &wp);
+ for (level = 0; level < display->wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(display, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -797,14 +668,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
}
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+skl_ddb_get_hw_plane_state(struct intel_display *display,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
- struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
@@ -837,7 +707,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
u16 *min_ddb, u16 *interim_ddb)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -849,7 +718,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
return;
for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(i915, pipe,
+ skl_ddb_get_hw_plane_state(display, pipe,
plane_id,
&ddb[plane_id],
&ddb_y[plane_id],
@@ -1367,16 +1236,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
@@ -1416,8 +1285,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
u64 data_rate = 0;
@@ -1427,7 +1296,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
data_rate += crtc_state->rel_data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->rel_data_rate_y[plane_id];
}
@@ -1489,7 +1358,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
const struct skl_plane_wm *wm)
{
/*
@@ -1543,7 +1412,6 @@ static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_dbuf_state *dbuf_state =
@@ -1585,7 +1453,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1596,7 +1464,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
@@ -1615,9 +1483,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
if (level < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ drm_dbg_kms(display->drm, "minimum required %d/%d\n",
blocks, iter.size);
return -EINVAL;
}
@@ -1645,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
@@ -1661,7 +1529,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
}
}
- drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+ drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
/*
* When we calculated watermark values we didn't know how high
@@ -1669,7 +1537,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1678,7 +1546,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
@@ -1686,7 +1554,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (skl_need_wm_copy_wa(i915, level, wm)) {
+ if (skl_need_wm_copy_wa(display, level, wm)) {
wm->wm[level].blocks = wm->wm[level - 1].blocks;
wm->wm[level].lines = wm->wm[level - 1].lines;
wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
@@ -1708,7 +1576,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_check_wm_level(&wm->trans_wm, ddb_y);
} else {
@@ -1734,7 +1602,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
@@ -1746,7 +1614,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -1772,7 +1640,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -1782,7 +1650,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ if (drm_WARN_ON(display->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
@@ -1798,15 +1666,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane, unsigned int pan_x)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non planar format have single plane\n");
return -EINVAL;
}
@@ -1824,7 +1690,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -1849,7 +1715,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines = 4;
}
- if (skl_needs_memory_bw_wa(i915))
+ if (skl_needs_memory_bw_wa(display))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -1860,7 +1726,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
- else if (DISPLAY_VER(i915) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1869,7 +1735,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -1906,18 +1772,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
plane_state->uapi.src.x1);
}
-static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+static bool skl_wm_has_lines(struct intel_display *display, int level)
{
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
return level > 0;
}
-static int skl_wm_max_lines(struct drm_i915_private *i915)
+static int skl_wm_max_lines(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 255;
else
return 31;
@@ -1938,7 +1804,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
@@ -1950,7 +1816,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ method1 = skl_wm_method1(display, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
crtc_state->hw.pipe_mode.crtc_htotal,
@@ -1965,7 +1831,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -1975,7 +1841,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result);
- if (DISPLAY_VER(i915) < 30)
+ if (DISPLAY_VER(display) < 30)
blocks++;
/*
@@ -1994,13 +1860,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* channels' impact on the level 0 memory latency and the relevant
* wm calculations.
*/
- if (skl_wm_has_lines(i915, level))
+ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -2025,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2042,10 +1908,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (!skl_wm_has_lines(i915, level))
+ if (!skl_wm_has_lines(display, level))
lines = 0;
- if (lines > skl_wm_max_lines(i915)) {
+ if (lines > skl_wm_max_lines(display)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -2064,8 +1930,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
- if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
- result->can_sagv = latency >= i915->display.sagv.block_time_us;
+ if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
+ result->can_sagv = latency >= display->sagv.block_time_us;
}
static void
@@ -2074,13 +1940,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *result_prev = &levels[0];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = skl_wm_latency(i915, level, wm_params);
+ unsigned int latency = skl_wm_latency(display, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -2094,21 +1960,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_plane_wm *plane_wm)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = 0;
- if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us +
- skl_wm_latency(i915, 0, wm_params);
+ if (display->sagv.block_time_us)
+ latency = display->sagv.block_time_us +
+ skl_wm_latency(display, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
sagv_wm);
}
-static void skl_compute_transition_wm(struct drm_i915_private *i915,
+static void skl_compute_transition_wm(struct intel_display *display,
struct skl_wm_level *trans_wm,
const struct skl_wm_level *wm0,
const struct skl_wm_params *wp)
@@ -2117,23 +1983,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915,
u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
- if (!skl_watermark_ipc_enabled(i915))
+ if (!skl_watermark_ipc_enabled(display))
return;
/*
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -2175,8 +2041,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane, int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_params wm_params;
int ret;
@@ -2188,13 +2053,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
- skl_compute_transition_wm(i915, &wm->trans_wm,
+ skl_compute_transition_wm(display, &wm->trans_wm,
&wm->wm[0], &wm_params);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
- skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ skl_compute_transition_wm(display, &wm->sagv.trans_wm,
&wm->sagv.wm0, &wm_params);
}
@@ -2254,8 +2119,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
@@ -2269,9 +2134,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ drm_WARN_ON(display->drm, !fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
@@ -2308,7 +2173,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
}
return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * cdclk_state->logical.cdclk));
+ 2 * intel_cdclk_logical(cdclk_state)));
}
static int
@@ -2411,15 +2276,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
- latency = skl_wm_latency(i915, level, NULL);
+ latency = skl_wm_latency(display, level, NULL);
if (latency == 0)
continue;
@@ -2436,8 +2300,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
@@ -2453,9 +2317,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*/
- crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2471,10 +2335,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
}
}
- if (DISPLAY_VER(i915) >= 12 &&
- i915->display.sagv.block_time_us &&
+ if (DISPLAY_VER(display) >= 12 &&
+ display->sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
- i915->display.sagv.block_time_us)) {
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2492,7 +2356,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state;
@@ -2508,7 +2372,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -2531,11 +2395,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
-static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+static bool skl_plane_wm_equals(struct intel_display *display,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- struct intel_display *display = &i915->display;
int level;
for (level = 0; level < display->wm.num_levels; level++) {
@@ -2590,14 +2453,14 @@ static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2608,7 +2471,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2627,7 +2490,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
u8 enabled_slices;
enum pipe pipe;
@@ -2637,7 +2500,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
*/
enabled_slices = BIT(DBUF_S1);
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
enabled_slices |= dbuf_state->slices[pipe];
return enabled_slices;
@@ -2647,7 +2510,6 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
struct intel_crtc_state *new_crtc_state;
@@ -2686,7 +2548,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
}
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
@@ -2709,11 +2571,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- DISPLAY_INFO(i915)->dbuf.slice_mask,
+ DISPLAY_INFO(display)->dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -2731,7 +2593,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
ret = skl_crtc_allocate_ddb(state, crtc);
if (ret)
return ret;
@@ -2755,10 +2617,101 @@ static char enast(bool enable)
return enable ? '*' : ' ';
}
+static noinline_for_stack void
+skl_print_plane_changes(struct intel_display *display,
+ struct intel_plane *plane,
+ const struct skl_plane_wm *old_wm,
+ const struct skl_plane_wm *new_wm)
+{
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
@@ -2775,7 +2728,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -2784,107 +2737,24 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
-
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
+ skl_print_plane_changes(display, plane, old_wm, new_wm);
}
}
}
@@ -2945,14 +2815,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2971,7 +2841,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2988,73 +2858,85 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
-/*
- * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
- * watermark level1 and up and above. If watermark level 1 is
- * invalid program it with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = DSB execution time
- * If Variable Refresh Rate where Vmin != Vmax != Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = 0
- */
+static int pkgc_max_linetime(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, max_linetime;
+
+ /*
+ * Apparenty the hardware uses WM_LINETIME internally for
+ * this stuff, compute everything based on that.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ display->pkgc.disable[crtc->pipe] = crtc_state->vrr.enable;
+ display->pkgc.linetime[crtc->pipe] = DIV_ROUND_UP(crtc_state->linetime, 8);
+ }
+
+ max_linetime = 0;
+ for_each_intel_crtc(display->drm, crtc) {
+ if (display->pkgc.disable[crtc->pipe])
+ return 0;
+
+ max_linetime = max(display->pkgc.linetime[crtc->pipe], max_linetime);
+ }
+
+ return max_linetime;
+}
+
void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- u32 latency = LNL_PKG_C_LATENCY_MASK;
- u32 added_wake_time = 0;
- u32 max_linetime = 0;
- u32 clear, val;
- bool fixed_refresh_rate = false;
- int i;
+ int max_linetime, latency, added_wake_time = 0;
if (DISPLAY_VER(display) < 20)
return;
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->vrr.enable ||
- (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
- fixed_refresh_rate = true;
+ mutex_lock(&display->wm.wm_mutex);
- max_linetime = max(new_crtc_state->linetime, max_linetime);
- }
+ latency = skl_watermark_max_latency(display, 1);
- if (fixed_refresh_rate) {
- added_wake_time = DSB_EXE_TIME +
- display->sagv.block_time_us;
+ /* FIXME runtime changes to enable_flipq are racy */
+ if (display->params.enable_flipq)
+ added_wake_time = intel_flipq_exec_time_us(display);
- latency = skl_watermark_max_latency(i915, 1);
+ /*
+ * Wa_22020432604
+ * "PKG_C_LATENCY Added Wake Time field is not working"
+ */
+ if (latency && IS_DISPLAY_VER(display, 20, 30)) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
- /* Wa_22020432604 */
- if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
- latency += added_wake_time;
- added_wake_time = 0;
- }
+ max_linetime = pkgc_max_linetime(state);
- /* Wa_22020299601 */
- if ((latency && max_linetime) &&
- (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
- latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
- } else if (!latency) {
- latency = LNL_PKG_C_LATENCY_MASK;
- }
+ if (max_linetime == 0 || latency == 0) {
+ latency = REG_FIELD_GET(LNL_PKG_C_LATENCY_MASK,
+ LNL_PKG_C_LATENCY_MASK);
+ added_wake_time = 0;
+ } else {
+ /*
+ * Wa_22020299601
+ * "Increase the latency programmed in PKG_C_LATENCY Pkg C Latency to be a
+ * multiple of the pipeline time from WM_LINETIME"
+ */
+ latency = roundup(latency, max_linetime);
}
- clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
- REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ intel_de_write(display, LNL_PKG_C_LATENCY,
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time) |
+ REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency));
- intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
+ mutex_unlock(&display->wm.wm_mutex);
}
static int
skl_compute_wm(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
@@ -3069,16 +2951,35 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
* check if the final watermarks changed.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_bw_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(display) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -3149,11 +3050,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-static void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct intel_crtc *crtc;
if (HAS_MBUS_JOINING(display))
@@ -3193,7 +3093,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (!crtc_state->hw.active)
continue;
- skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ skl_ddb_get_hw_plane_state(display, crtc->pipe,
plane_id, ddb, ddb_y,
min_ddb, interim_ddb);
@@ -3209,13 +3109,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
*/
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(i915, slices);
+ mbus_offset = mbus_ddb_offset(display, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+ skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
@@ -3228,50 +3128,53 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+bool skl_watermark_ipc_enabled(struct intel_display *display)
{
- return i915->display.wm.ipc_enabled;
+ return display->wm.ipc_enabled;
}
-void skl_watermark_ipc_update(struct drm_i915_private *i915)
+void skl_watermark_ipc_update(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
}
-static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(i915))
+ if (display->platform.skylake)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915))
- return i915->dram_info.symmetric_memory;
+ if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake) {
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+
+ return dram_info->symmetric_memory;
+ }
return true;
}
-void skl_watermark_ipc_init(struct drm_i915_private *i915)
+void skl_watermark_ipc_init(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+ display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
}
static void
-adjust_wm_latency(struct drm_i915_private *i915,
+adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
- bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
int i, level;
/*
@@ -3307,43 +3210,43 @@ adjust_wm_latency(struct drm_i915_private *i915,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (wm_lv_0_adjust_needed)
+ if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
wm[0] += 1;
}
-static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
+ int num_levels = display->wm.num_levels;
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, num_levels, 6);
+ adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
- int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
- int mult = IS_DG2(i915) ? 2 : 1;
+ int num_levels = display->wm.num_levels;
+ int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
+ int mult = display->platform.dg2 ? 2 : 1;
u32 val;
int ret;
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3354,9 +3257,9 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3365,24 +3268,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, num_levels, read_latency);
+ adjust_wm_latency(display, wm, num_levels, read_latency);
}
-static void skl_setup_wm_latency(struct drm_i915_private *i915)
+static void skl_setup_wm_latency(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (HAS_HW_SAGV_WM(display))
display->wm.num_levels = 6;
else
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(i915, display->wm.skl_latency);
+ mtl_read_wm_latency(display, display->wm.skl_latency);
else
- skl_read_wm_latency(i915, display->wm.skl_latency);
+ skl_read_wm_latency(display, display->wm.skl_latency);
- intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3410,19 +3311,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = {
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *dbuf_state;
- dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
if (IS_ERR(dbuf_state))
return ERR_CAST(dbuf_state);
return to_intel_dbuf_state(dbuf_state);
}
-int intel_dbuf_init(struct drm_i915_private *i915)
+int intel_dbuf_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
@@ -3457,34 +3357,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 val = 0;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
}
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -3492,7 +3392,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
val |= MBUS_DBOX_B_CREDIT(8);
}
- if (DISPLAY_VERx100(i915) == 1400) {
+ if (DISPLAY_VERx100(display) == 1400) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
val |= MBUS_DBOX_BW_8CREDITS_MTL;
else
@@ -3502,22 +3402,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
return val;
}
-static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
pipe_mbus_dbox_ctl(crtc, dbuf_state));
}
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
@@ -3527,7 +3427,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
return;
- pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+ pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3544,10 +3444,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
return intel_atomic_lock_global_state(&dbuf_state->base);
}
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus)
{
- struct intel_display *display = &i915->display;
enum dbuf_slice slice;
if (!HAS_MBUS_JOINING(display))
@@ -3571,7 +3470,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
@@ -3586,7 +3485,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
- intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
new_dbuf_state->joined_mbus);
}
@@ -3594,13 +3493,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
- drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+ drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -3611,7 +3509,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void mbus_ctl_join_update(struct drm_i915_private *i915,
+static void mbus_ctl_join_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state,
enum pipe pipe)
{
@@ -3627,7 +3525,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
else
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
- intel_de_rmw(i915, MBUS_CTL,
+ intel_de_rmw(display, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
@@ -3635,18 +3533,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus),
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
- mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+ mbus_ctl_join_update(display, new_dbuf_state, pipe);
}
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
@@ -3751,9 +3649,40 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
-static void skl_mbus_sanitize(struct drm_i915_private *i915)
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->enabled_slices);
+}
+
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->active_pipes);
+}
+
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+
+ if (new_dbuf_state &&
+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ return true;
+
+ if (DISPLAY_VER(display) < 30) {
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices)
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_mbus_sanitize(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(display->dbuf.obj.state);
@@ -3768,28 +3697,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915)
dbuf_state->active_pipes);
dbuf_state->joined_mbus = false;
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
dbuf_state->mdclk_cdclk_ratio,
dbuf_state->joined_mbus);
- pipe_mbus_dbox_ctl_update(i915, dbuf_state);
- mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+ pipe_mbus_dbox_ctl_update(display, dbuf_state);
+ mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
}
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+static bool skl_dbuf_is_misconfigured(struct intel_display *display)
{
const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
@@ -3807,7 +3736,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+static void skl_dbuf_sanitize(struct intel_display *display)
{
struct intel_crtc *crtc;
@@ -3822,12 +3751,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
- if (!skl_dbuf_is_misconfigured(i915))
+ if (!skl_dbuf_is_misconfigured(display))
return;
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+ drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -3837,16 +3766,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+ drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
-static void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct intel_display *display)
{
- skl_mbus_sanitize(i915);
- skl_dbuf_sanitize(i915);
+ skl_mbus_sanitize(display);
+ skl_dbuf_sanitize(display);
}
void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3897,7 +3826,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_hw_state {
@@ -3912,7 +3840,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
u8 hw_enabled_slices;
int level;
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -3925,26 +3853,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 &&
+ hw_enabled_slices != display->dbuf.enabled_slices)
+ drm_err(display->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
+ display->dbuf.enabled_slices,
hw_enabled_slices);
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name, level,
sw_wm_level->enable,
@@ -3959,7 +3887,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3975,7 +3903,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3991,7 +3919,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -4007,7 +3935,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
@@ -4024,29 +3952,29 @@ static const struct intel_wm_funcs skl_wm_funcs = {
.sanitize = skl_wm_sanitize,
};
-void skl_wm_init(struct drm_i915_private *i915)
+void skl_wm_init(struct intel_display *display)
{
- intel_sagv_init(i915);
+ intel_sagv_init(display);
- skl_setup_wm_latency(i915);
+ skl_setup_wm_latency(display);
- i915->display.funcs.wm = &skl_wm_funcs;
+ display->funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(skl_watermark_ipc_enabled(i915)));
+ str_yes_no(skl_watermark_ipc_enabled(display)));
return 0;
}
static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *i915 = inode->i_private;
+ struct intel_display *display = inode->i_private;
- return single_open(file, skl_watermark_ipc_status_show, i915);
+ return single_open(file, skl_watermark_ipc_status_show, display);
}
static ssize_t skl_watermark_ipc_status_write(struct file *file,
@@ -4054,8 +3982,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *i915 = m->private;
- intel_wakeref_t wakeref;
+ struct intel_display *display = m->private;
bool enable;
int ret;
@@ -4063,12 +3990,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
if (ret < 0)
return ret;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- if (!skl_watermark_ipc_enabled(i915) && enable)
- drm_info(&i915->drm,
+ with_intel_display_rpm(display) {
+ if (!skl_watermark_ipc_enabled(display) && enable)
+ drm_info(display->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- i915->display.wm.ipc_enabled = enable;
- skl_watermark_ipc_update(i915);
+ display->wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(display);
}
return len;
@@ -4085,7 +4012,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
static int intel_sagv_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
static const char * const sagv_status[] = {
[I915_SAGV_UNKNOWN] = "unknown",
[I915_SAGV_DISABLED] = "disabled",
@@ -4093,37 +4020,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
[I915_SAGV_NOT_CONTROLLED] = "not controlled",
};
- seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
seq_printf(m, "SAGV modparam: %s\n",
- str_enabled_disabled(i915->display.params.enable_sagv));
- seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
- seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+ str_enabled_disabled(display->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915)
+void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct drm_minor *minor = display->drm->primary;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
- &intel_sagv_status_fops);
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ display, &intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
+unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
- unsigned int latency = skl_wm_latency(i915, level, NULL);
+ for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
+ unsigned int latency = skl_wm_latency(display, level, NULL);
if (latency)
return latency;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index d9cff6c54310..62790816f030 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,18 +8,15 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-#include "intel_wm_types.h"
-
-struct drm_i915_private;
+enum plane_id;
struct intel_atomic_state;
-struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dbuf_state;
struct intel_display;
struct intel_plane;
struct intel_plane_state;
+struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -27,11 +24,10 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
- const struct intel_bw_state *bw_state);
-bool intel_has_sagv(struct drm_i915_private *i915);
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
+bool intel_has_sagv(struct intel_display *display);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -45,14 +41,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
-void skl_watermark_ipc_init(struct drm_i915_private *i915);
-void skl_watermark_ipc_update(struct drm_i915_private *i915);
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_ipc_init(struct intel_display *display);
+void skl_watermark_ipc_update(struct intel_display *display);
+bool skl_watermark_ipc_enabled(struct intel_display *display);
+void skl_watermark_debugfs_register(struct intel_display *display);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+unsigned int skl_watermark_max_latency(struct intel_display *display,
int initial_wm_level);
-void skl_wm_init(struct drm_i915_private *i915);
+void skl_wm_init(struct intel_display *display);
const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
@@ -63,40 +59,25 @@ unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_st
struct intel_plane *plane, int width,
int height, int cpp);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- u8 mdclk_cdclk_ratio;
- bool joined_mbus;
-};
-
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(global_state) \
- container_of_const((global_state), struct intel_dbuf_state, base)
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state);
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state);
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-
-int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
int ratio);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
void intel_program_dpkgc_latency(struct intel_atomic_state *state);
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index af717df83197..6d9f3312de7e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -30,15 +30,17 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
@@ -251,31 +253,31 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
return 0;
}
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct intel_display *display)
{
- vlv_flisdsi_get(dev_priv);
+ vlv_flisdsi_get(display->drm);
- vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0001);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0005);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0025);
udelay(150);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
- vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0000);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_put(display->drm);
}
static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -455,20 +457,19 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- vlv_flisdsi_get(dev_priv);
+ vlv_flisdsi_get(display->drm);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
- vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_write(display->drm, 0x04, 0x0004);
+ vlv_flisdsi_put(display->drm);
/* bandgap reset is needed after everytime we do power gate */
- band_gap_reset(dev_priv);
+ band_gap_reset(display);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -495,11 +496,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -559,23 +560,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
-static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port)
{
- return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ return display->platform.geminilake || display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
intel_de_write(display, MIPI_DEVICE_READY(display, port),
@@ -594,7 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((display->platform.broxton || port == PORT_A) &&
intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
@@ -612,7 +612,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -620,7 +619,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp = intel_dsi->pixel_overlap;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
@@ -633,7 +632,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
u32 temp;
temp = intel_de_read(display, port_ctrl);
@@ -644,7 +643,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (display->platform.broxton)
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= crtc->pipe ?
@@ -664,12 +663,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
@@ -730,7 +728,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
bool glk_cold_boot = false;
@@ -745,7 +742,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -753,7 +750,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
vlv_dsi_pll_enable(encoder, pipe_config);
}
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Add MIPI IO reset programming for modeset */
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
@@ -762,13 +759,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
- if (!IS_GEMINILAKE(dev_priv))
+ if (!display->platform.geminilake)
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
@@ -776,7 +773,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -788,7 +785,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
- if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ if (display->platform.geminilake && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -836,11 +833,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -860,9 +857,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_clear_device_ready(encoder);
else
vlv_dsi_clear_device_ready(encoder);
@@ -874,13 +871,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -907,7 +903,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Power down DSI regulator to save power */
intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
@@ -917,12 +913,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -939,7 +935,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
@@ -957,13 +952,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ !bxt_dsi_pll_is_enabled(display))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
@@ -971,10 +966,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
port == PORT_C)
enabled = intel_de_read(display,
- TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -989,7 +984,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1024,7 +1019,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 hactive, hfp, hsync, hbp, vfp, vsync;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
u16 hfp_sw, hsync_sw, hbp_sw;
u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
crtc_hblank_start_sw, crtc_hblank_end_sw;
@@ -1060,7 +1055,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
intel_de_read(display,
- BXT_MIPI_TRANS_VTOTAL(port));
+ BXT_MIPI_TRANS_VTOTAL(port)) + 1;
hactive = adjusted_mode->crtc_hdisplay;
hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
@@ -1088,6 +1083,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* vertical values are in terms of lines */
vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vbp = intel_de_read(display, MIPI_VBP_COUNT(display, port));
vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
@@ -1096,6 +1092,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+ drm_WARN_ON(display->drm, adjusted_mode->crtc_vdisplay +
+ vfp + vsync + vbp != adjusted_mode->crtc_vtotal);
adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
@@ -1177,15 +1175,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1218,7 +1216,6 @@ static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1253,7 +1250,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1265,7 +1262,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ adjusted_mode->crtc_vtotal - 1);
}
intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
@@ -1307,7 +1304,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1327,7 +1323,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1342,7 +1338,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
enum pipe pipe = crtc->pipe;
intel_de_rmw(display, MIPI_CTRL(display, port),
@@ -1377,7 +1373,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1424,7 +1420,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
!intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
@@ -1461,7 +1457,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
@@ -1513,18 +1509,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return;
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1596,7 +1591,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
@@ -1645,7 +1640,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ mul = display->platform.geminilake ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
@@ -1653,7 +1648,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ drm_dbg_kms(display->drm, "prepare count too high %u\n",
prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1674,7 +1669,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ drm_dbg_kms(display->drm, "exit zero count too high %u\n",
exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1685,7 +1680,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ drm_dbg_kms(display->drm, "clock zero count too high %u\n",
clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1695,7 +1690,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ drm_dbg_kms(display->drm, "trail count too high %u\n",
trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1761,7 +1756,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1770,7 +1765,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return 320000;
/*
@@ -1778,7 +1773,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* picture gets unstable, despite that values are
* correct for DSI PLL and DE PLL.
*/
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return 158400;
return 0;
@@ -1903,9 +1898,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
{ }
};
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1914,16 +1908,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(display, &port))
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ if (display->platform.geminilake || display->platform.broxton)
+ display->dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+ display->dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1938,12 +1932,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
encoder = &intel_dsi->base;
intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->compute_config = intel_dsi_compute_config;
encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->enable = bxt_dsi_enable;
encoder->disable = intel_dsi_disable;
encoder->post_disable = intel_dsi_post_disable;
@@ -1963,7 +1957,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->pipe_mask = ~0;
else if (port == PORT_A)
encoder->pipe_mask = BIT(PIPE_A);
@@ -1979,10 +1973,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
@@ -1998,18 +1992,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
- drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ drm_dbg_kms(display->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -2021,7 +2015,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
+ drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
@@ -2030,12 +2024,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(connector, encoder);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(connector)) {
- drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ drm_dbg_kms(display->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index 277bacfbc551..ff349b5876c2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -7,14 +7,14 @@
#define __VLV_DSI_H__
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct intel_display *display);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
@@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void vlv_dsi_init(struct drm_i915_private *dev_priv)
+static inline void vlv_dsi_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 2ed47e7d1051..d42b61e6f076 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,9 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -57,7 +59,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
return dsi_clk_khz;
}
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct intel_display *display,
struct intel_crtc_state *config,
int target_dsi_clk)
{
@@ -68,11 +70,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ drm_err(display->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
ref_clk = 100000;
n = 4;
m_min = 70;
@@ -116,13 +118,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_dsi_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int refclk = display->platform.cherryview ? 100000 : 25000;
int i;
pll_ctl = config->dsi_pll.ctrl;
@@ -147,7 +149,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ drm_err(display->drm, "wrong P1 divisor\n");
return 0;
}
@@ -157,7 +159,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ drm_err(display->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -175,16 +177,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ ret = dsi_calc_mnp(display, config, dsi_clk);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -196,7 +198,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
pclk = vlv_dsi_pclk(encoder, config);
@@ -213,15 +215,15 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO,
@@ -229,45 +231,45 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
*/
usleep_range(10, 50);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- vlv_cck_put(dev_priv);
- drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ vlv_cck_put(display->drm);
+ drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, tmp);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
}
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
u32 val;
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- if (IS_GEMINILAKE(dev_priv)) {
+ val = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (display->platform.geminilake) {
if (!(val & BXT_DSIA_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -301,34 +303,34 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pll_ctl, pll_div;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
- pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- vlv_cck_put(dev_priv);
+ vlv_cck_get(display->drm);
+ pll_ctl = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_DIVIDER);
+ vlv_cck_put(display->drm);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
@@ -352,14 +354,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pclk;
- config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
pclk = bxt_dsi_pclk(encoder, config);
- drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -375,10 +377,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
-static void glk_dsi_program_esc_clock(struct drm_device *dev,
- const struct intel_crtc_state *config)
+static void glk_dsi_program_esc_clock(struct intel_display *display,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
@@ -415,17 +416,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
txesc2_div = min_t(u32, div2_value, 10);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV2,
(1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+static void bxt_dsi_program_clocks(struct intel_display *display, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -436,7 +436,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -472,13 +472,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -494,7 +494,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
@@ -503,11 +503,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -519,7 +519,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ if (display->platform.broxton && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
pclk = bxt_dsi_pclk(encoder, config);
@@ -536,46 +536,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Configure PLL vales */
- intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+ intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(display, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ bxt_dsi_program_clocks(display, port, config);
} else {
- glk_dsi_program_esc_clock(encoder->base.dev, config);
+ glk_dsi_program_esc_clock(display, config);
}
/* Enable DSI PLL */
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
/* Clear old configurations */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
@@ -592,12 +591,11 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void assert_dsi_pll(struct intel_display *display, bool state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool cur_state;
- vlv_cck_get(i915);
- cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
- vlv_cck_put(i915);
+ vlv_cck_get(display->drm);
+ cur_state = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(display->drm);
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index f975660fa609..f26e31a7dd69 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_display;
struct intel_encoder;
@@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct intel_display *display);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.c b/drivers/gpu/drm/i915/display/vlv_sideband.c
new file mode 100644
index 000000000000..e18045f2b89d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_dpio_phy.h"
+#include "vlv_sideband.h"
+
+static enum vlv_iosf_sb_unit vlv_dpio_phy_to_unit(struct intel_display *display,
+ enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (display->platform.cherryview)
+ return phy == DPIO_PHY0 ? VLV_IOSF_SB_DPIO_2 : VLV_IOSF_SB_DPIO;
+ else
+ return VLV_IOSF_SB_DPIO;
+}
+
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+ u32 val;
+
+ val = vlv_iosf_sb_read(drm, unit, reg);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ drm_WARN(display->drm, val == 0xffffffff,
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+
+ vlv_iosf_sb_write(drm, unit, reg, val);
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.h b/drivers/gpu/drm/i915/display/vlv_sideband.h
new file mode 100644
index 000000000000..2c240d81fead
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#include "vlv_iosf_sb.h"
+#include "vlv_iosf_sb_reg.h"
+
+enum dpio_phy;
+struct drm_device;
+
+static inline void vlv_bunit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline u32 vlv_bunit_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_BUNIT, reg);
+}
+
+static inline void vlv_bunit_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_BUNIT, reg, val);
+}
+
+static inline void vlv_bunit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline void vlv_cck_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline u32 vlv_cck_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCK, reg);
+}
+
+static inline void vlv_cck_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCK, reg, val);
+}
+
+static inline void vlv_cck_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline void vlv_ccu_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline u32 vlv_ccu_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCU, reg);
+}
+
+static inline void vlv_ccu_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCU, reg, val);
+}
+
+static inline void vlv_ccu_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline void vlv_dpio_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+#ifdef I915
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg);
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val);
+#else
+static inline u32 vlv_dpio_read(struct drm_device *drm, int phy, int reg)
+{
+ return 0;
+}
+static inline void vlv_dpio_write(struct drm_device *drm,
+ int phy, int reg, u32 val)
+{
+}
+#endif
+
+static inline void vlv_dpio_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+static inline void vlv_flisdsi_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline u32 vlv_flisdsi_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_FLISDSI, reg);
+}
+
+static inline void vlv_flisdsi_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_FLISDSI, reg, val);
+}
+
+static inline void vlv_flisdsi_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline void vlv_nc_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline u32 vlv_nc_read(struct drm_device *drm, u8 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_NC, addr);
+}
+
+static inline void vlv_nc_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline void vlv_punit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+static inline u32 vlv_punit_read(struct drm_device *drm, u32 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_PUNIT, addr);
+}
+
+static inline int vlv_punit_write(struct drm_device *drm, u32 addr, u32 val)
+{
+ return vlv_iosf_sb_write(drm, VLV_IOSF_SB_PUNIT, addr, val);
+}
+
+static inline void vlv_punit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index ddda468241ef..6e4d0ce3952f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 7d97ea2a653e..c4854c5b4e0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
index e6c382973129..9d7ee1579900 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab1af978911b..15835952352e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2011-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index e5b0f66ea1fe..6e682a6a0574 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 67ac2586a0f3..0267c924634b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9473050ac842..f4f1c979d1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright 2012 Red Hat Inc
*/
@@ -106,7 +105,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (!obj->base.filp)
return -ENODEV;
- ret = call_mmap(obj->base.filp, vma);
+ ret = vfs_mmap(obj->base.filp, vma);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 75a143d996e0..ef3b14ae2e0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
-#include "display/intel_display.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7796c4119ef5..ca7e9216934a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008,2010 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ea7561ae6e13..232b984f60b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 28d6526e32ab..8044d34707b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 388f90784d8a..f566191d843b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+ return mr && intel_memory_type_is_local(mr->type);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3dabb857960..75f5b0e871ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
#include <linux/anon_inodes.h>
#include <linux/mman.h>
-#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 196417fd0f5c..946fb9825eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 356530b599ce..1f38e367c60b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a5f34542135c..565f8fa330db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
@@ -17,7 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
+struct drm_scanout_buffer;
enum intel_region_id;
+struct intel_framebuffer;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -692,6 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index 9fbf14867a2a..b6dc3d1b9bb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -77,7 +77,7 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
* Set object's frontbuffer pointer. If frontbuffer is already set for the
* object keep it and return it's pointer to the caller. Please note that RCU
* mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
- * function is protected by i915->display.fb_tracking.lock
+ * function is protected by i915->display->fb_tracking.lock
*
* Return: pointer to frontbuffer which was set.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 68413c05c812..64600aa8227f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8780aa243105..c16a57160b26 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -1,12 +1,14 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
#include <linux/vmalloc.h>
+#include "display/intel_fb.h"
+#include "display/intel_display_types.h"
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
@@ -355,6 +357,145 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
+struct i915_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct i915_framebuffer {
+ struct intel_framebuffer base;
+ struct i915_panic_data panic;
+};
+
+static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct i915_framebuffer, base)->panic;
+}
+
+static void i915_panic_kunmap(struct i915_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *page;
+ struct page **pages;
+ struct sgt_iter iter;
+
+ /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
+ if (!pages)
+ return NULL;
+
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ return pages;
+}
+
+static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ unsigned int offset = fb->panic_tiling(sb->width, x, y);
+
+ iosys_map_wr(&sb->map[0], offset, u32, color);
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ unsigned int new_page;
+ unsigned int offset;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ i915_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr =
+ kmap_local_page_try_from_panic(panic->pages[panic->page]);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+{
+ struct i915_framebuffer *i915_fb;
+
+ i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
+ if (i915_fb)
+ return &i915_fb->base;
+ return NULL;
+}
+
+/*
+ * Setup the gem framebuffer for drm_panic access.
+ * Use current vaddr if it exists, or setup a list of pages.
+ * pfn is not supported yet.
+ */
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+{
+ enum i915_map_type has_type;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ void *ptr;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr) {
+ if (i915_gem_object_has_iomem(obj))
+ iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
+ else
+ iosys_map_set_vaddr(&sb->map[0], ptr);
+
+ if (fb->panic_tiling)
+ sb->set_pixel = i915_gem_object_panic_map_set_pixel;
+ return 0;
+ }
+ if (i915_gem_object_has_struct_page(obj)) {
+ panic->pages = i915_gem_object_panic_pages(obj);
+ if (!panic->pages)
+ return -ENOMEM;
+ panic->page = -1;
+ sb->set_pixel = i915_gem_object_panic_page_set_pixel;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+{
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ i915_panic_kunmap(panic);
+ panic->page = -1;
+ kfree(panic->pages);
+ panic->pages = NULL;
+}
+
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ef85c6dc9fd5..f9e7cab140f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 900c08337942..f0857c5c96df 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index bedf1e95941a..bd5bd2c5e7f9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index ae3343c81a64..e3d188455f67 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -1,12 +1,12 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
#include <linux/pagevec.h>
#include <linux/shmem_fs.h>
#include <linux/swap.h>
+#include <linux/uio.h>
#include <drm/drm_cache.h>
@@ -303,38 +303,21 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
- .for_reclaim = 1,
};
- unsigned long i;
+ struct folio *folio = NULL;
+ int error = 0;
/*
* Leave mmapings intact (GTT will have been revoked on unbinding,
- * leaving only CPU mmapings around) and add those pages to the LRU
+ * leaving only CPU mmapings around) and add those folios to the LRU
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
-
- /* Begin writeback on each dirty page */
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- struct page *page;
-
- page = find_lock_page(mapping, i);
- if (!page)
- continue;
-
- if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
- int ret;
-
- SetPageReclaim(page);
- ret = mapping->a_ops->writepage(page, &wbc);
- if (!PageWriteback(page))
- ClearPageReclaim(page);
- if (!ret)
- goto put;
- }
- unlock_page(page);
-put:
- put_page(page);
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (folio_mapped(folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ else
+ error = shmem_writeout(folio, NULL, NULL);
}
}
@@ -417,12 +400,12 @@ static int
shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
- struct address_space *mapping = obj->base.filp->f_mapping;
- const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain;
- loff_t pos;
- unsigned int pg;
+ struct file *file = obj->base.filp;
+ struct kiocb kiocb;
+ struct iov_iter iter;
+ ssize_t written;
+ u64 size = arg->size;
/* Caller already validated user args */
GEM_BUG_ON(!access_ok(user_data, arg->size));
@@ -445,63 +428,24 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
- /*
- * Before the pages are instantiated the object is treated as being
- * in the CPU domain. The pages will be clflushed as required before
- * use, and we can freely write into the pages directly. If userspace
- * races pwrite with any other operation; corruption will ensue -
- * that is userspace's prerogative!
- */
+ if (size > MAX_RW_COUNT)
+ return -EFBIG;
- remain = arg->size;
- pos = arg->offset;
- pg = offset_in_page(pos);
+ if (!file->f_op->write_iter)
+ return -EINVAL;
- do {
- unsigned int len, unwritten;
- struct folio *folio;
- void *data, *vaddr;
- int err;
- char __maybe_unused c;
-
- len = PAGE_SIZE - pg;
- if (len > remain)
- len = remain;
-
- /* Prefault the user page to reduce potential recursion */
- err = __get_user(c, user_data);
- if (err)
- return err;
-
- err = __get_user(c, user_data + len - 1);
- if (err)
- return err;
-
- err = aops->write_begin(obj->base.filp, mapping, pos, len,
- &folio, &data);
- if (err < 0)
- return err;
-
- vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
- pagefault_disable();
- unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
- pagefault_enable();
- kunmap_local(vaddr);
-
- err = aops->write_end(obj->base.filp, mapping, pos, len,
- len - unwritten, folio, data);
- if (err < 0)
- return err;
-
- /* We don't handle -EFAULT, leave it to the caller to check */
- if (unwritten)
- return -ENODEV;
-
- remain -= len;
- user_data += len;
- pos += len;
- pg = 0;
- } while (remain);
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = arg->offset;
+ iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size);
+
+ written = file->f_op->write_iter(&kiocb, &iter);
+ BUG_ON(written == -EIOCBQUEUED);
+
+ if (written != size)
+ return -EIO;
+
+ if (written < 0)
+ return written;
return 0;
}
@@ -654,9 +598,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
{
struct drm_i915_gem_object *obj;
struct file *file;
- const struct address_space_operations *aops;
- loff_t pos;
- int err;
+ loff_t pos = 0;
+ ssize_t err;
GEM_WARN_ON(IS_DGFX(i915));
obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
@@ -666,29 +609,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
- aops = file->f_mapping->a_ops;
- pos = 0;
- do {
- unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct folio *folio;
- void *fsdata;
-
- err = aops->write_begin(file, file->f_mapping, pos, len,
- &folio, &fsdata);
- if (err < 0)
- goto fail;
+ err = kernel_write(file, data, size, &pos);
- memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
+ if (err < 0)
+ goto fail;
- err = aops->write_end(file, file->f_mapping, pos, len, len,
- folio, fsdata);
- if (err < 0)
- goto fail;
-
- size -= len;
- data += len;
- pos += len;
- } while (size);
+ if (err != size) {
+ err = -EIO;
+ goto fail;
+ }
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index aec41f0f098f..b81e67504bbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2015 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 9d958a6f377e..3380151edfc1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index af85d0c28168..8814cbcde5b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5ac23ff3feff..5a296ba3758a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 09b68713ab32..307a18eede72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2012-2014 Intel Corporation
*
- * Based on amdgpu_mn, which bears the following notice:
+ * Based on amdgpu_mn, which bears the following notice:
*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 1f55e62044a4..991666fd9f85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
@@ -107,11 +106,6 @@ static void fence_set_priority(struct dma_fence *fence,
rcu_read_unlock();
}
-static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_chain_ops;
-}
-
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
@@ -127,7 +121,7 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
for (i = 0; i < array->num_fences; i++)
fence_set_priority(array->fences[i], attr);
- } else if (__dma_fence_is_chain(fence)) {
+ } else if (dma_fence_is_chain(fence)) {
struct dma_fence *iter;
/* The chain is ordered; if we boost the last, we boost all */
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 46b9a17d6abc..a09e2eb47175 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,21 +1,27 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
#include "i915_utils.h"
+static int add_param(struct fs_context *fc, const char *key, const char *val)
+{
+ return vfs_parse_fs_string(fc, key, val, strlen(val));
+}
+
void i915_gemfs_init(struct drm_i915_private *i915)
{
- char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
+ struct fs_context *fc;
struct vfsmount *gemfs;
+ int ret;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -39,8 +45,16 @@ void i915_gemfs_init(struct drm_i915_private *i915)
if (!type)
goto err;
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
- if (IS_ERR(gemfs))
+ fc = fs_context_for_mount(type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ goto err;
+ ret = add_param(fc, "source", "tmpfs");
+ if (!ret)
+ ret = add_param(fc, "huge", "within_size");
+ if (!ret)
+ gemfs = fc_mount_longterm(fc);
+ put_fs_context(fc);
+ if (ret)
goto err;
i915->mm.gemfs = gemfs;
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 5d835e44c4f6..16d4333c9a4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index bac15196b4d2..86d9d2fcb6a6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "display/intel_display_core.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 804f74084bd4..9c3f17e51885 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg)
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
+ int ret;
+ bool unuse_mm = false;
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
@@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_live_subtests(tests, i915);
+ if (!current->mm) {
+ kthread_use_mm(current->active_mm);
+ unuse_mm = true;
+ }
+
+ ret = i915_live_subtests(tests, i915);
+
+ if (unuse_mm)
+ kthread_unuse_mm(current->active_mm);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 325da0414d94..f6a98cf1e5a5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -79,6 +79,29 @@ struct lock_class_key;
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
+
+#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+
+#define ENGINE_INSTANCES_MASK(gt, first, count) \
+ __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
+
+#define RCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
+#define BCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
+#define VDBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
+#define CCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+
#define GEN6_RING_FAULT_REG_READ(engine__) \
intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
@@ -355,4 +378,12 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f6c59f20832f..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 0c723e7c71a2..889e61843ff3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -328,7 +328,7 @@ static bool fence_is_active(const struct i915_fence_reg *fence)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct intel_display *display = &ggtt->vm.i915->display;
+ struct intel_display *display = ggtt->vm.i915->display;
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 59eed0a0ce90..c5f5f0bdfb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
+static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
+ is_present, is_local);
+}
+
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
+ ggtt->vm.read_entry = gmch_ggtt_read_entry;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 1e925c75fb08..c43febc862dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -284,7 +284,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
if (gt->gsc.intf[intf_id].irq < 0)
return;
- ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
+ ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq);
if (ret)
gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 3182f19b9837..c7f59d60fac6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -6,6 +6,8 @@
#include <linux/string_helpers.h>
#include <linux/suspend.h>
+#include "display/intel_display_power.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_params.h"
@@ -70,7 +72,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
@@ -104,7 +106,7 @@ static int __gt_park(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index b635aa2820d9..87ef85483bae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -22,7 +22,7 @@
#include "intel_rps.h"
#include "intel_runtime_pm.h"
#include "intel_uncore.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
@@ -366,9 +366,9 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_printf(p, "SW control enabled: %s\n",
str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
- vlv_punit_get(i915);
- freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq_sts = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index d1a382dfaa1d..93298820bee2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -250,11 +250,17 @@ void intel_gt_watchdog_work(struct work_struct *work)
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
if (!i915_request_completed(rq)) {
struct dma_fence *f = &rq->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(f);
+ timeline = dma_fence_timeline_name(f);
pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
- f->ops->get_driver_name(f),
- f->ops->get_timeline_name(f),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
f->seqno);
+ rcu_read_unlock();
i915_request_cancel(rq, -EINTR);
}
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 30b128b1fde7..afbc5c769308 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -176,7 +176,6 @@ static void clear_vm_list(struct list_head *list)
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
-
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 0a36ea751b63..9d3a3ad567a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -312,6 +312,7 @@ struct i915_address_space {
u64 (*pte_encode)(dma_addr_t addr,
unsigned int pat_index,
u32 flags); /* Create a valid PTE */
+ dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local);
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -340,6 +341,8 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags);
+ dma_addr_t (*read_entry)(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 51847a846002..c481b56fa67d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -751,7 +751,6 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
-
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cf41d325712e..5dd8121f4b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -314,7 +314,6 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
-
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB - L3 */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index dbdcfe130ad4..4a1675dea1c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1205,7 +1205,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
- struct intel_display *display = &gt->i915->display;
+ struct intel_display *display = gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1423,7 +1423,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
bool need_display_reset;
bool reset_display;
@@ -1448,7 +1448,8 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
else
drm_dev_wedged_event(&gt->i915->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6e9977b2d180..2a6d79abf25b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -365,7 +365,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
- if (!stop_ring(engine)) {
+ /*
+ * Sometimes engine head failed to set to zero even after writing into it.
+ * Use wait_for_atomic() with 20ms delay to let engine resumes from
+ * correct RING_HEAD. Experimented different values and determined
+ * that 20ms works best based on testing.
+ */
+ if (wait_for_atomic((!stop_ring(engine) == 0), 20)) {
drm_err(&engine->i915->drm,
"failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -604,7 +610,6 @@ static int ring_context_alloc(struct intel_context *ce)
/* One ringbuffer to rule them all */
GEM_BUG_ON(!engine->legacy.ring);
ce->ring = engine->legacy.ring;
- ce->timeline = intel_timeline_get(engine->legacy.timeline);
GEM_BUG_ON(ce->state);
if (engine->context_size) {
@@ -617,6 +622,8 @@ static int ring_context_alloc(struct intel_context *ce)
ce->state = vma;
}
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 64e9317f58fb..0b35fdd461d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,7 +8,7 @@
#include <drm/intel/i915_drm.h>
#include "display/intel_display.h"
-#include "display/intel_display_irq.h"
+#include "display/intel_display_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -23,7 +23,7 @@
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
@@ -74,7 +74,7 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
static void rps_timer(struct timer_list *t)
{
- struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_rps *rps = timer_container_of(rps, t, timer);
struct intel_gt *gt = rps_to_gt(rps);
struct intel_engine_cs *engine;
ktime_t dt, last, timestamp;
@@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -607,9 +608,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
rps->ips.last_time2 = ktime_get_raw_ns();
- spin_lock(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_enable(display);
spin_unlock_irq(&mchdev_lock);
@@ -621,14 +620,13 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- spin_lock(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_disable(display);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
@@ -822,9 +820,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- vlv_punit_get(i915);
- err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ err = vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
val, intel_gpu_freq(rps, val));
@@ -1001,6 +999,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ /* Don't decrement num_waiters for req where increment was skipped */
+ if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
+ return;
+
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
@@ -1029,11 +1031,15 @@ void intel_rps_boost(struct i915_request *rq)
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
- if (slpc->min_freq_softlimit >= slpc->boost_freq)
- return;
-
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
+ /*
+ * Skip queuing boost work if frequency is already boosted,
+ * but still increment num_waiters.
+ */
+ if (slpc->min_freq_softlimit >= slpc->boost_freq)
+ return;
+
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,
@@ -1274,7 +1280,7 @@ static int chv_rps_max_freq(struct intel_rps *rps)
struct intel_gt *gt = rps_to_gt(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
switch (gt->info.sseu.eu_total) {
case 8:
@@ -1301,7 +1307,7 @@ static int chv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_GPU_DUTYCYCLE_REG);
val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
@@ -1312,7 +1318,7 @@ static int chv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
return val & FB_GFX_FREQ_FUSE_MASK;
}
@@ -1322,7 +1328,7 @@ static u32 chv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMIN_AT_VMIN_FUSE);
val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
return val & FB_GFX_FREQ_FUSE_MASK;
@@ -1356,14 +1362,14 @@ static bool chv_rps_enable(struct intel_rps *rps)
GEN6_PM_RP_DOWN_TIMEOUT);
/* Setting Fixed Bias */
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1381,7 +1387,7 @@ static int vlv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp1;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
@@ -1394,7 +1400,7 @@ static int vlv_rps_max_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp0;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
@@ -1408,9 +1414,9 @@ static int vlv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rpe;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
@@ -1421,7 +1427,7 @@ static int vlv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
@@ -1457,15 +1463,15 @@ static bool vlv_rps_enable(struct intel_rps *rps)
/* WaGsvRC0ResidencyMethod:vlv */
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1678,7 +1684,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
rps->gpll_ref_freq =
- vlv_get_cck_clock(i915, "GPLL ref",
+ vlv_get_cck_clock(&i915->drm, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
@@ -1690,7 +1696,7 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1714,7 +1720,7 @@ static void vlv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1724,7 +1730,7 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1748,7 +1754,7 @@ static void chv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -2113,9 +2119,9 @@ static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
} else if (GRAPHICS_VER(i915) >= 12) {
r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_punit_get(i915);
- freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
} else if (GRAPHICS_VER(i915) >= 6) {
r = GEN6_RPSTAT1;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 5135b90a2a40..ece445109305 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * i915->irq_lock
+ * gt->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.h b/drivers/gpu/drm/i915/gt/intel_wopcm.h
index 17d6aa86008a..d2038b6de5e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/gt/intel_wopcm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 116683ebe074..b37e400f74e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -156,7 +156,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
- list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*list),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 64315b714743..79741f043f03 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -108,7 +108,7 @@ static void advance(struct i915_request *request)
static void hw_delay_complete(struct timer_list *t)
{
- struct mock_engine *engine = from_timer(engine, t, hw_delay);
+ struct mock_engine *engine = timer_container_of(engine, t, hw_delay);
struct i915_request *request;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 22e750108c5f..23f04f6f8fba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_internal.h"
+#include "i915_drv.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -859,6 +860,14 @@ static int live_lrc_timestamp(void *arg)
};
/*
+ * This test was designed to isolate a hardware bug.
+ * The bug was found and fixed in future generations but
+ * now the test pollutes our CI on previous generation.
+ */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ return 0;
+
+ /*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 401bee030dbc..54bc447efce0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -537,7 +537,7 @@ struct spinner_timer {
static void spinner_kill(struct timer_list *timer)
{
- struct spinner_timer *st = from_timer(st, timer, timer);
+ struct spinner_timer *st = timer_container_of(st, timer, timer);
igt_spinner_end(&st->spin);
pr_info("%s\n", __func__);
@@ -661,7 +661,7 @@ static int live_emit_pte_full_ring(void *arg)
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
timer_delete_sync(&st.timer);
- destroy_timer_on_stack(&st.timer);
+ timer_destroy_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
out_put:
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 908483ab0bc8..41716ed454b7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -33,15 +33,22 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
- u64 rc0_power, rc6_power;
+ struct intel_rps *rps = &gt->rps;
intel_wakeref_t wakeref;
+ u64 rc0_sample_energy[2];
+ u64 rc6_sample_energy[2];
+ u64 sleep_time = 1000;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ u64 rc0_power;
+ u64 rc6_power;
bool has_power;
+ u64 threshold;
ktime_t dt;
u64 res[2];
int err = 0;
- u32 rc0_freq = 0;
- u32 rc6_freq = 0;
- struct intel_rps *rps = &gt->rps;
+ u64 diff;
+
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -60,14 +67,15 @@ int live_rc6_manual(void *arg)
/* Force RC6 off for starters */
__intel_rc6_disable(rc6);
- msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+ /* wakeup is not immediate, takes about 100us on icl */
+ usleep_range(1000, 2000);
res[0] = rc6_residency(rc6);
dt = ktime_get();
- rc0_power = librapl_energy_uJ();
- msleep(1000);
- rc0_power = librapl_energy_uJ() - rc0_power;
+ rc0_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
+ rc0_sample_energy[1] = librapl_energy_uJ() - rc0_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
rc0_freq = intel_rps_read_actual_frequency_fw(rps);
@@ -79,11 +87,12 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_sample_energy[1],
ktime_to_ns(dt));
+
if (!rc0_power) {
if (rc0_freq)
- pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ pr_debug("No power measured while in RC0! GPU Freq: %uMHz in RC0\n",
rc0_freq);
else
pr_err("No power and freq measured while in RC0\n");
@@ -98,10 +107,10 @@ int live_rc6_manual(void *arg)
res[0] = rc6_residency(rc6);
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
- rc6_power = librapl_energy_uJ();
- msleep(1000);
+ rc6_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
rc6_freq = intel_rps_read_actual_frequency_fw(rps);
- rc6_power = librapl_energy_uJ() - rc6_power;
+ rc6_sample_energy[1] = librapl_energy_uJ() - rc6_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
@@ -113,13 +122,24 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_sample_energy[1],
ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ pr_info("GPU consumed %lluuW in RC0 and %lluuW in RC6\n",
rc0_power, rc6_power);
+
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
- rc6_freq, rc0_freq);
+ pr_err("GPU leaked energy while in RC6!\n"
+ "GPU Freq: %uMHz in RC6 and %uMHz in RC0\n"
+ "RC0 energy before & after sleep respectively: %lluuJ %lluuJ\n"
+ "RC6 energy before & after sleep respectively: %lluuJ %lluuJ\n",
+ rc6_freq, rc0_freq, rc0_sample_energy[0], rc0_sample_energy[1],
+ rc6_sample_energy[0], rc6_sample_energy[1]);
+
+ diff = res[1] - res[0];
+ threshold = (9 * NSEC_PER_MSEC * sleep_time) / 10;
+ if (diff < threshold)
+ pr_err("Did not enter RC6 properly, RC6 start residency=%lluns, RC6 end residency=%lluns\n",
+ res[0], res[1]);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 3941f2d6fa47..69ed946a39e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -143,7 +143,7 @@ pte_tlbinv(struct intel_context *ce,
if (ce->engine->class == OTHER_CLASS)
msleep(200);
else
- msleep(10);
+ usleep_range(10000, 20000);
if (va == vb) {
if (!i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 5dc0ccd07636..d550eb6edfb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -230,7 +230,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
- return -EINVAL;
+ return -EINVAL;
}
if (min_ver.major) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 9df80c325fc1..f360f020d8f1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -313,8 +313,13 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
*
* The same WA bit is used for both and 22011391025 is applicable to
* all DG2.
+ *
+ * Platforms post DG2 prevent this issue in hardware by stalling
+ * submissions. With this flag GuC will schedule as to avoid such
+ * stalls.
*/
- if (IS_DG2(gt->i915))
+ if (IS_DG2(gt->i915) ||
+ (CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)))
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e8a04e476c57..09a64f224c49 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -220,8 +220,7 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
/*
* Use no-overwrite mode by default, where relay will stop accepting
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f8cb7c630d5b..127316d2c8aa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -633,7 +633,7 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
atomic_inc(&guc->outstanding_submission_g2h);
ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- if (ret)
+ if (ret && g2h_len_dw)
atomic_dec(&guc->outstanding_submission_g2h);
return ret;
@@ -3443,18 +3443,29 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
* GuC is active, lets destroy this context, but at this point we can still be racing
* with suspend, so we undo everything if the H2G fails in deregister_context so
* that GuC reset will find this context during clean up.
+ *
+ * There is a race condition where the reset code could have altered
+ * this context's state and done a wakeref put before we try to
+ * deregister it here. So check if the context is still set to be
+ * destroyed before undoing earlier changes, to avoid two wakeref puts
+ * on the same context.
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
+ bool pending_destroyed;
spin_lock_irqsave(&ce->guc_state.lock, flags);
- set_context_registered(ce);
- clr_context_destroyed(ce);
+ pending_destroyed = context_destroyed(ce);
+ if (pending_destroyed) {
+ set_context_registered(ce);
+ clr_context_destroyed(ce);
+ }
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
*/
- intel_wakeref_put_async(&gt->wakeref);
+ if (pending_destroyed)
+ intel_wakeref_put_async(&gt->wakeref);
}
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index ec33ad942115..e848a04a80dc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1116,7 +1116,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
if (ret)
gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index eedd1865bb98..62d14f82256f 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
+ intel_wakeref_t wakeref;
int ret;
if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&gt->ggtt->vm.mutex);
- mmio_hw_access_pre(gt);
+ wakeref = mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(uncore->rpm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index f25ee2953baf..a91e23c22ea1 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -38,6 +38,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
@@ -50,6 +51,7 @@
#include "trace.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
@@ -1286,7 +1288,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1333,7 +1335,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1421,7 +1423,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index baccbf1761b7..673534f061ef 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
.diff = 0,
};
struct diff_mmio *node, *next;
+ intel_wakeref_t wakeref;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 1e1af5e545a4..74197e337585 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
@@ -43,6 +44,7 @@
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_sprite_regs.h"
@@ -69,7 +71,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
@@ -82,7 +84,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
@@ -183,7 +185,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -634,7 +636,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -664,7 +666,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8090bc53c7e1..bc7f05f9a271 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -63,19 +63,6 @@ struct intel_vgpu;
#define AUX_BURST_SIZE 20
-#define SBI_RESPONSE_MASK 0x3
-#define SBI_RESPONSE_SHIFT 0x1
-#define SBI_STAT_MASK 0x1
-#define SBI_STAT_SHIFT 0x0
-#define SBI_OPCODE_SHIFT 8
-#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
-#define SBI_CMD_IORD 2
-#define SBI_CMD_IOWR 3
-#define SBI_CMD_CRRD 6
-#define SBI_CMD_CRWR 7
-#define SBI_ADDR_OFFSET_SHIFT 16
-#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
-
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 89147d33168c..2031b97de2b7 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -35,6 +35,7 @@
#include <drm/display/drm_dp.h>
#include "display/intel_dp_aux_regs.h"
+#include "display/intel_gmbus.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index f9f7ef131371..a8079cfa8e1d 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -39,9 +39,11 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -154,7 +156,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -211,7 +213,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, fmt;
int pipe;
@@ -342,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2fa7ca19ba5d..ae9b0ded3651 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gt);
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
}
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 01d890999f25..1d10c16e6465 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -570,14 +570,15 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(gt->uncore->rpm);
+ return intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+ intel_wakeref_t wakeref)
{
- intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4efee6797873..f446f73f0fe2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,6 +40,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
@@ -47,6 +48,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -55,7 +57,9 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
@@ -264,6 +268,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
+ intel_wakeref_t wakeref;
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +276,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
return 0;
}
@@ -513,7 +518,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
+ refclk = 135000;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -544,7 +549,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
+ int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {};
@@ -656,7 +661,7 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
@@ -1020,7 +1025,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -1062,7 +1067,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -1410,12 +1415,12 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
- vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
- sbi_offset);
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
+
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset);
}
read_vreg(vgpu, offset, p_data, bytes);
return 0;
@@ -1429,21 +1434,20 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
- data |= SBI_READY;
+ data &= ~SBI_STATUS_MASK;
+ data |= SBI_STATUS_READY;
- data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data &= ~SBI_RESPONSE_MASK;
data |= SBI_RESPONSE_SUCCESS;
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
- write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg_t(vgpu, SBI_DATA));
+ write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -1975,10 +1979,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
vgpu == gvt->scheduler.engine_owner[engine->id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
vgpu_vreg(vgpu, offset) =
intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -2196,7 +2202,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
@@ -3209,10 +3215,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
@@ -3233,8 +3241,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 336d079c4207..a956da68e6bd 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index e16e0d4c9534..da1135fa7cda 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -36,6 +36,7 @@
#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 9f97f743aa71..6c2d68e88266 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
if (!vgpu_data->active)
return;
@@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d9e263913ff..967c0501e91e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
-
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ce3cc93ea211..c6263c6d3384 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -51,6 +51,7 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_driver.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
@@ -58,11 +59,12 @@
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
+#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
+#include "display/intel_sbi.h"
#include "display/intel_sprite_uapi.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -108,8 +110,7 @@
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
-#include "intel_sbi.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -134,10 +135,6 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->display.hotplug.dp_wq == NULL)
- goto out_free_wq;
-
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
@@ -146,12 +143,10 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
*/
dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
if (dev_priv->unordered_wq == NULL)
- goto out_free_dp_wq;
+ goto out_free_wq;
return 0;
-out_free_dp_wq:
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
@@ -163,7 +158,6 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->unordered_wq);
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -223,7 +217,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -235,10 +229,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_uncore_mmio_debug_init_early(dev_priv);
- spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- intel_sbi_init(dev_priv);
+ intel_sbi_init(display);
vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
@@ -263,9 +256,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
i915_gem_init_early(dev_priv);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev_priv);
-
intel_irq_init(dev_priv);
intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
@@ -290,7 +280,7 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(display);
@@ -302,9 +292,11 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
mutex_destroy(&dev_priv->sb_lock);
vlv_iosf_sb_fini(dev_priv);
- intel_sbi_fini(dev_priv);
+ intel_sbi_fini(display);
i915_params_free(&dev_priv->params);
+
+ intel_display_device_remove(display);
}
/**
@@ -318,7 +310,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -465,7 +457,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
@@ -576,9 +568,11 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(dev_priv);
+ ret = intel_dram_detect(dev_priv);
+ if (ret)
+ goto err_opregion;
- intel_bw_init_hw(dev_priv);
+ intel_bw_init_hw(display);
return 0;
@@ -604,7 +598,7 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
@@ -622,11 +616,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static int i915_driver_register(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
+ int ret;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -634,10 +629,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(&dev_priv->drm, 0)) {
- drm_err(&dev_priv->drm,
- "Failed to register driver for userspace access!\n");
- return;
+ ret = drm_dev_register(&dev_priv->drm, 0);
+ if (ret) {
+ i915_probe_error(dev_priv,
+ "Failed to register driver for userspace access!\n");
+ drm_dev_unregister(&dev_priv->drm);
+ i915_pmu_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
+ return ret;
}
i915_debugfs_register(dev_priv);
@@ -660,6 +659,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+
+ return 0;
}
/**
@@ -668,7 +669,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -740,6 +741,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct drm_i915_private *i915;
+ struct intel_display *display;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
struct drm_i915_private, drm);
@@ -754,7 +756,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return ERR_CAST(display);
+
+ i915->display = display;
return i915;
}
@@ -788,7 +794,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
- display = &i915->display;
+ display = i915->display;
ret = i915_driver_early_probe(i915);
if (ret < 0)
@@ -834,7 +840,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_gem;
- i915_driver_register(i915);
+ ret = i915_driver_register(i915);
+ if (ret)
+ goto out_cleanup_gem;
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -845,6 +853,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_gem:
+ intel_pxp_fini(i915);
i915_gem_suspend(i915);
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
@@ -877,7 +886,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -910,7 +919,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -933,8 +941,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
-
- intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -964,7 +970,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
@@ -981,15 +987,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
- intel_hpd_cancel_work(i915);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&i915->display);
- intel_encoder_shutdown_all(&i915->display);
+ intel_encoder_suspend_all(display);
+ intel_encoder_shutdown_all(display);
- intel_dmc_suspend(&i915->display);
+ intel_dmc_suspend(display);
i915_gem_suspend(i915);
@@ -1044,7 +1050,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
@@ -1064,12 +1070,12 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&dev_priv->display);
+ intel_encoder_suspend_all(display);
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(display);
@@ -1094,7 +1100,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1166,7 +1172,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -1195,13 +1201,11 @@ static int i915_drm_resume(struct drm_device *dev)
i9xx_display_sr_restore(display);
- intel_vga_redisable(display);
-
intel_gmbus_reset(display);
intel_pps_unlock_regs_wa(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/*
* Interrupts have to be enabled before any batches are run. If not the
@@ -1227,7 +1231,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(display);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(display);
intel_display_driver_resume(display);
@@ -1235,7 +1239,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -1253,7 +1257,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1485,7 +1489,7 @@ static int i915_pm_restore(struct device *kdev)
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1575,7 +1579,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
@@ -1584,7 +1588,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1633,11 +1637,11 @@ static int intel_runtime_resume(struct device *kdev)
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
}
- skl_watermark_ipc_update(dev_priv);
+ skl_watermark_ipc_update(display);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 4b67ad9a61cd..1e95ecb2a163 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54538b6f85df..4e4e89746aa6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,13 +32,11 @@
#include <uapi/drm/i915_drm.h>
+#include <linux/pci.h>
#include <linux/pm_qos.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display_limits.h"
-#include "display/intel_display_core.h"
-
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
@@ -49,8 +47,6 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
-#include "soc/intel_pch.h"
-
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -64,11 +60,11 @@
#include "intel_step.h"
#include "intel_uncore.h"
+struct dram_info;
struct drm_i915_clock_gating_funcs;
-struct vlv_s0ix_state;
+struct intel_display;
struct intel_pxp;
-
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+struct vlv_s0ix_state;
/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
struct i915_dsm {
@@ -179,7 +175,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
- struct intel_display display;
+ struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -224,16 +220,12 @@ struct drm_i915_private {
};
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- /* protects the irq masks */
- spinlock_t irq_lock;
bool irqs_enabled;
- /* LPT/WPT IOSF sideband protection */
- struct mutex sbi_lock;
-
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
+ unsigned long locked_unit_mask;
struct pm_qos_request qos;
} vlv_iosf_sb;
@@ -272,9 +264,6 @@ struct drm_i915_private {
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* PCH chipset type */
- enum intel_pch pch_type;
-
unsigned long gem_quirks;
struct i915_gem_mm mm;
@@ -292,24 +281,7 @@ struct drm_i915_private {
u32 suspend_count;
struct vlv_s0ix_state *vlv_s0ix_state;
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ const struct dram_info *dram_info;
struct intel_runtime_pm runtime_pm;
@@ -380,14 +352,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-#define rb_to_uabi_engine(rb) \
- rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
-
-#define for_each_uabi_engine(engine__, i915__) \
- for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
- (engine__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -596,29 +560,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
-#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
-#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
-
-#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
- unsigned int first__ = (first); \
- unsigned int count__ = (count); \
- ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
-})
-
-#define ENGINE_INSTANCES_MASK(gt, first, count) \
- __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
-
-#define RCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
-#define BCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
-#define VDBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
-#define CCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
-
#define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode)
/*
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 82e9d289398c..20b3cb29cfff 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -134,4 +134,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index be8149e46281..6fcda6d7b5b7 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -16,7 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a6613eed3398..0e4b832dff84 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -726,13 +726,6 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt_display(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
-{
- err_printf(m, "IER: 0x%08x\n", gt->ier);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
-}
-
static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -878,7 +871,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
- err_print_gt_display(m, error->gt);
err_print_gt_global_nonguc(m, error->gt);
err_print_gt_fences(m, error->gt);
@@ -1767,27 +1759,6 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture display registers. */
-static void gt_record_display_regs(struct intel_gt_coredump *gt)
-{
- struct intel_uncore *uncore = gt->_gt->uncore;
- struct drm_i915_private *i915 = uncore->i915;
-
- if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
-
- if (GRAPHICS_VER(i915) >= 8)
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- else if (IS_VALLEYVIEW(i915))
- gt->ier = intel_uncore_read(uncore, VLV_IER);
- else if (HAS_PCH_SPLIT(i915))
- gt->ier = intel_uncore_read(uncore, DEIER);
- else if (GRAPHICS_VER(i915) == 2)
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- else
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
-}
-
/* Capture all other registers that GuC doesn't capture. */
static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
{
@@ -1821,9 +1792,12 @@ static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
gt->gtier[i] =
intel_uncore_read(uncore, GEN8_GT_IER(i));
gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (GRAPHICS_VER(i915) >= 5) {
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
+ } else {
+ gt->gtier[0] = intel_uncore_read(uncore, GEN2_IER);
+ gt->ngtier = 1;
}
gt->eir = intel_uncore_read(uncore, EIR);
@@ -2043,7 +2017,6 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_display_regs(gc);
gt_record_global_nonguc_regs(gc);
/*
@@ -2094,7 +2067,7 @@ static struct i915_gpu_coredump *
__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
@@ -2160,7 +2133,6 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
- static bool warned;
if (IS_ERR_OR_NULL(error))
return;
@@ -2174,16 +2146,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
i915_gpu_coredump_get(error);
- if (!xchg(&warned, true) &&
- ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
- pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
- pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- }
+ drm_info(&i915->drm, "GPU error state saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
/**
@@ -2542,8 +2506,8 @@ static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
- .read_new = error_state_read,
- .write_new = error_state_write,
+ .read = error_state_read,
+ .write = error_state_write,
};
void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 749e1c55613e..182324979278 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -146,7 +146,6 @@ struct intel_gt_coredump {
/* Generic register state */
u32 eir;
u32 pgtbl_er;
- u32 ier;
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
@@ -164,8 +163,6 @@ struct intel_gt_coredump {
u32 clock_frequency;
u32 clock_period_ns;
- /* Display related */
- u32 derrmr;
u32 sfc_done[I915_MAX_SFC]; /* gen12 */
u32 nfence;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37ca4a35daf2..191ed8bb1d9c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,6 +33,7 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
@@ -230,7 +231,7 @@ out:
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -277,14 +278,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -306,12 +307,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -324,7 +325,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -367,14 +368,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -392,12 +393,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -418,6 +419,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -458,9 +460,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(i915, de_iir);
+ ivb_display_irq_handler(display, de_iir);
else
- ilk_display_irq_handler(i915, de_iir);
+ ilk_display_irq_handler(display, de_iir);
ret = IRQ_HANDLED;
}
@@ -506,6 +508,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -524,7 +527,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_de_irq_handler(display, master_ctl);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -556,6 +559,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -575,13 +579,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -613,6 +617,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
+ struct intel_display *display = i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -641,36 +646,22 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
gen11_gt_irq_handler(gt, master_ctl);
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
return IRQ_HANDLED;
}
-static void ibx_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (HAS_PCH_NOP(dev_priv))
- return;
-
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
-
- if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
-}
-
-/* drm_dma.h hooks
-*/
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -686,45 +677,43 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
- ibx_irq_reset(dev_priv);
+ ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
+
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
- gen8_display_irq_reset(dev_priv);
+ gen8_display_irq_reset(display);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
-
- if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev_priv);
-
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -732,6 +721,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -741,7 +731,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -751,6 +741,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -760,25 +751,25 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- ilk_de_irq_postinstall(dev_priv);
+ ilk_de_irq_postinstall(display);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -786,20 +777,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
gen11_gt_irq_postinstall(gt);
- gen11_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(display);
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
@@ -809,6 +803,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -819,7 +814,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
- dg1_de_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(display);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -827,11 +822,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
@@ -900,9 +895,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -911,6 +907,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -932,26 +929,20 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(dev_priv)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i915_display_irq_postinstall(display);
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -972,13 +963,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -992,9 +983,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i915_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -1006,9 +997,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -1036,6 +1028,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1061,20 +1054,13 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i965_display_irq_postinstall(display);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1096,11 +1082,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -1119,9 +1105,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i965_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
@@ -1167,71 +1153,62 @@ void intel_irq_fini(struct drm_i915_private *i915)
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- return cherryview_irq_handler;
- else if (IS_VALLEYVIEW(dev_priv))
- return valleyview_irq_handler;
- else if (GRAPHICS_VER(dev_priv) == 4)
- return i965_irq_handler;
- else
- return i915_irq_handler;
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- return dg1_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 11)
- return gen11_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 8)
- return gen8_irq_handler;
- else
- return ilk_irq_handler;
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ return dg1_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ return ilk_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ return i965_irq_handler;
+ else
+ return i915_irq_handler;
}
static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_reset(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_reset(dev_priv);
- else
- i915_irq_reset(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_reset(dev_priv);
- else
- ilk_irq_reset(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_reset(dev_priv);
+ else
+ i915_irq_reset(dev_priv);
}
static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_postinstall(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_postinstall(dev_priv);
- else
- i915_irq_postinstall(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_postinstall(dev_priv);
- else
- ilk_irq_postinstall(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_postinstall(dev_priv);
+ else
+ i915_irq_postinstall(dev_priv);
}
/**
@@ -1280,6 +1257,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
@@ -1289,7 +1267,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
free_irq(irq, dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
dev_priv->irqs_enabled = false;
}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 76e2801619f0..c33bd3d83069 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
@@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma,
};
int err;
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
while (offset >= r.sgt.max >> PAGE_SHIFT) {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 21006c7f615c..b2e311f4791a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -663,7 +663,6 @@ static const struct intel_device_info dg1_info = {
DGFX_FEATURES,
.__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .require_force_probe = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index de0b413600a1..1658f1246c6f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
struct i915_perf *perf = stream->perf;
struct intel_gt *gt = stream->engine->gt;
struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
if (WARN_ON(stream != g->exclusive_stream))
return;
@@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_configs(stream);
free_noa_wait(stream);
- if (perf->spurious_report_rs.missed) {
- gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
- perf->spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e5a188ce3185..5bc696bfbb0f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -108,11 +108,11 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
-static u32 config_mask(const u64 config)
+static __always_inline u32 config_mask(const u64 config)
{
unsigned int bit = config_bit(config);
- if (__builtin_constant_p(config))
+ if (__builtin_constant_p(bit))
BUILD_BUG_ON(bit >
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
@@ -121,7 +121,7 @@ static u32 config_mask(const u64 config)
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
- return BIT(config_bit(config));
+ return BIT(bit);
}
static bool is_engine_event(struct perf_event *event)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c5064eebe063..03b895897f60 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,10 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
-#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
-#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
-
/*
* Reset registers
*/
@@ -187,46 +183,6 @@
/* DPIO registers */
#define DPIO_DEVFN 0
-#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1 << 1)
-#define DPIO_CMNRST (1 << 0)
-
-#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
-#define MIPIO_RST_CTRL (1 << 2)
-
-#define _BXT_PHY_CTL_DDI_A 0x64C00
-#define _BXT_PHY_CTL_DDI_B 0x64C10
-#define _BXT_PHY_CTL_DDI_C 0x64C20
-#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
-#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
-#define BXT_PHY_LANE_ENABLED (1 << 8)
-#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
- _BXT_PHY_CTL_DDI_B)
-
-#define _PHY_CTL_FAMILY_DDI 0x64C90
-#define _PHY_CTL_FAMILY_EDP 0x64C80
-#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
-#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) \
- _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
- _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-
-/* UAIMI scratch pad register 1 */
-#define UAIMI_SPR1 _MMIO(0x4F074)
-/* SKL VccIO mask */
-#define SKL_VCCIO_MASK 0x1
-/* SKL balance leg register */
-#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
-/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
-#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
-/* Balance leg disable bits */
-#define BALANCE_LEG_DISABLE_SHIFT 23
-#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
-
/*
* Fence registers
* [0-7] @ 0x2000 gen2,gen3
@@ -372,16 +328,6 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
-#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
-#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
-#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
-#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
-#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
-#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
-#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
-#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
-#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
-
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
#define ERR_INT_INVALID_GTT_PTE (1 << 29)
@@ -413,25 +359,6 @@
#define CLAIM_ER_OVERFLOW REG_BIT(16)
#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
-#define DERRMR _MMIO(0x44050)
-/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1 << 0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
-#define DERRMR_PIPEA_VBLANK (1 << 3)
-#define DERRMR_PIPEA_HBLANK (1 << 5)
-#define DERRMR_PIPEB_SCANLINE (1 << 8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
-#define DERRMR_PIPEB_VBLANK (1 << 11)
-#define DERRMR_PIPEB_HBLANK (1 << 13)
-/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1 << 14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
-#define DERRMR_PIPEC_VBLANK (1 << 21)
-#define DERRMR_PIPEC_HBLANK (1 << 22)
-
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -458,11 +385,6 @@
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
- VLV_IER, \
- VLV_IIR)
-
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
@@ -475,16 +397,6 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
-#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
-#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
-#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
-#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
-#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
-#define VLV_ERROR_PAGE_TABLE (1 << 4)
-#define VLV_ERROR_CLAIM (1 << 0)
-
-#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
-
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
@@ -509,23 +421,6 @@
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
-#define _MBUS_ABOX0_CTL 0x45038
-#define _MBUS_ABOX1_CTL 0x45048
-#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) \
- _MMIO(_PICK_EVEN_2RANGES(x, 2, \
- _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
-
-#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
-#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
-#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
-#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
-#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
-#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-
/*
* Make render/texture TLB fetches lower priority than associated data
* fetches. This is not turned on by default.
@@ -700,172 +595,6 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-#define IPS_CTL _MMIO(0x43408)
-#define IPS_ENABLE REG_BIT(31)
-#define IPS_FALSE_COLOR REG_BIT(4)
-
-/*
- * Clock control & power management
- */
-#define _DPLL_A 0x6014
-#define _DPLL_B 0x6018
-#define _CHV_DPLL_C 0x6030
-#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
-
-#define VGA0 _MMIO(0x6000)
-#define VGA1 _MMIO(0x6004)
-#define VGA_PD _MMIO(0x6010)
-#define VGA0_PD_P2_DIV_4 (1 << 7)
-#define VGA0_PD_P1_DIV_2 (1 << 5)
-#define VGA0_PD_P1_SHIFT 0
-#define VGA0_PD_P1_MASK (0x1f << 0)
-#define VGA1_PD_P2_DIV_4 (1 << 15)
-#define VGA1_PD_P1_DIV_2 (1 << 13)
-#define VGA1_PD_P1_SHIFT 8
-#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_SDVO_HIGH_SPEED (1 << 30)
-#define DPLL_DVO_2X_MODE (1 << 30)
-#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
-#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
-#define DPLL_VGA_MODE_DIS (1 << 28)
-#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-#define DPLL_MODE_MASK (3 << 26)
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1 << 15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
-#define DPLL_SSC_REF_CLK_CHV (1 << 13)
-#define DPLL_PORTC_READY_MASK (0xf << 4)
-#define DPLL_PORTB_READY_MASK (0xf)
-
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-
-/* Additional CHV pll/phy registers */
-#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
-#define DPLL_PORTD_READY_MASK (0xf)
-#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
-#define PHY_LDO_DELAY_0NS 0x0
-#define PHY_LDO_DELAY_200NS 0x1
-#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
-#define PHY_CH_SU_PSR 0x1
-#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
-#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
-#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
-
-/*
- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
- * this field (only one bit may be set).
- */
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
-/* i830, required in DVO non-gang */
-#define PLL_P2_DIVIDE_BY_4 (1 << 23)
-#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-#define PLL_REF_INPUT_DREFCLK (0 << 13)
-#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
-#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-#define PLL_REF_INPUT_MASK (3 << 13)
-#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* Ironlake */
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
-# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
-# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
-
-/*
- * Parallel to Serial Load Pulse phase selection.
- * Selects the phase for the 10X DPLL clock for the PCIe
- * digital display port. The range is 4 to 13; 10 or more
- * is just a flip delay. The default is 6
- */
-#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
-/*
- * SDVO multiplier for 945G/GM. Not used on 965.
- */
-#define SDVO_MULTIPLIER_MASK 0x000000ff
-#define SDVO_MULTIPLIER_SHIFT_HIRES 4
-#define SDVO_MULTIPLIER_SHIFT_VGA 0
-
-#define _DPLL_A_MD 0x601c
-#define _DPLL_B_MD 0x6020
-#define _CHV_DPLL_C_MD 0x603c
-#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
-
-/*
- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
- *
- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
- */
-#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-#define DPLL_MD_UDI_DIVIDER_SHIFT 24
-/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
-/*
- * SDVO/UDI pixel multiplier.
- *
- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
- * dummy bytes in the datastream at an increased clock rate, with both sides of
- * the link knowing how many bytes are fill.
- *
- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
- * through an SDVO command.
- *
- * This register field has values of multiplication factor minus 1, with
- * a maximum multiplier of 5 for SDVO.
- */
-#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
-/*
- * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
- * This best be set to the default value (3) or the CRT won't work. No,
- * I don't entirely understand what this does...
- */
-#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-
-#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
-
-#define _FPA0 0x6040
-#define _FPA1 0x6044
-#define _FPB0 0x6048
-#define _FPB1 0x604c
-#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
-#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
-#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
-#define FP_N_DIV_SHIFT 16
-#define FP_M1_DIV_MASK 0x00003f00
-#define FP_M1_DIV_SHIFT 8
-#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
-#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -877,11 +606,13 @@
#define DPLLA_TEST_N_BYPASS (1 << 3)
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
#define D_STATE _MMIO(0x6104)
#define DSTATE_GFX_RESET_I830 (1 << 6)
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
+
#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -997,27 +728,6 @@
#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
#define DEUC _MMIO(0x6214) /* CRL only */
-#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1 << 15)
-
-#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
-
-#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
-#define CDCLK_FREQ_SHIFT 4
-#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
-#define CZCLK_FREQ_MASK 0xf
-
-#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
-#define PFI_CREDIT_63 (9 << 28) /* chv only */
-#define PFI_CREDIT_31 (8 << 28) /* chv only */
-#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
-#define PFI_CREDIT_RESEND (1 << 27)
-#define VGA_FAST_MODE_DISABLE (1 << 14)
-
-#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-
-#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
-
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
@@ -1048,26 +758,11 @@
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
/*
- * Overlay regs
- */
-
-#define OVADD _MMIO(0x30000)
-#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3 << 20)
-#define OGAMC5 _MMIO(0x30010)
-#define OGAMC4 _MMIO(0x30014)
-#define OGAMC3 _MMIO(0x30018)
-#define OGAMC2 _MMIO(0x3001c)
-#define OGAMC1 _MMIO(0x30020)
-#define OGAMC0 _MMIO(0x30024)
-
-/*
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS REG_BIT(27)
-#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
-#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
#define PWM2_GATING_DIS REG_BIT(14)
#define PWM1_GATING_DIS REG_BIT(13)
@@ -1075,615 +770,6 @@
#define TGL_VRH_GATING_DIS REG_BIT(31)
#define DPT_GATING_DIS REG_BIT(22)
-#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
-#define BXT_GMBUS_GATING_DIS (1 << 14)
-
-#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
-#define DPCE_GATING_DIS REG_BIT(17)
-
-#define _CLKGATE_DIS_PSL_A 0x46520
-#define _CLKGATE_DIS_PSL_B 0x46524
-#define _CLKGATE_DIS_PSL_C 0x46528
-#define DUPS1_GATING_DIS (1 << 15)
-#define DUPS2_GATING_DIS (1 << 19)
-#define DUPS3_GATING_DIS (1 << 23)
-#define CURSOR_GATING_DIS REG_BIT(28)
-#define DPF_GATING_DIS (1 << 10)
-#define DPF_RAM_GATING_DIS (1 << 9)
-#define DPFR_GATING_DIS (1 << 8)
-
-#define CLKGATE_DIS_PSL(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
-
-#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
-#define _CLKGATE_DIS_PSL_EXT_B 0x46550
-#define PIPEDMC_GATING_DIS REG_BIT(12)
-
-#define CLKGATE_DIS_PSL_EXT(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-
-/*
- * Display engine regs
- */
-
-/* Pipe/transcoder A timing regs */
-#define _TRANS_HTOTAL_A 0x60000
-#define _TRANS_HTOTAL_B 0x61000
-#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
-#define HTOTAL_MASK REG_GENMASK(31, 16)
-#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
-#define HACTIVE_MASK REG_GENMASK(15, 0)
-#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
-
-#define _TRANS_HBLANK_A 0x60004
-#define _TRANS_HBLANK_B 0x61004
-#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
-#define HBLANK_END_MASK REG_GENMASK(31, 16)
-#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
-#define HBLANK_START_MASK REG_GENMASK(15, 0)
-#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
-
-#define _TRANS_HSYNC_A 0x60008
-#define _TRANS_HSYNC_B 0x61008
-#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
-#define HSYNC_END_MASK REG_GENMASK(31, 16)
-#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
-#define HSYNC_START_MASK REG_GENMASK(15, 0)
-#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
-
-#define _TRANS_VTOTAL_A 0x6000c
-#define _TRANS_VTOTAL_B 0x6100c
-#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
-#define VTOTAL_MASK REG_GENMASK(31, 16)
-#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
-#define VACTIVE_MASK REG_GENMASK(15, 0)
-#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
-
-#define _TRANS_VBLANK_A 0x60010
-#define _TRANS_VBLANK_B 0x61010
-#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
-#define VBLANK_END_MASK REG_GENMASK(31, 16)
-#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
-#define VBLANK_START_MASK REG_GENMASK(15, 0)
-#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
-
-#define _TRANS_VSYNC_A 0x60014
-#define _TRANS_VSYNC_B 0x61014
-#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
-#define VSYNC_END_MASK REG_GENMASK(31, 16)
-#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
-#define VSYNC_START_MASK REG_GENMASK(15, 0)
-#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
-
-#define _PIPEASRC 0x6001c
-#define _PIPEBSRC 0x6101c
-#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
-#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
-#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
-#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
-#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-
-#define _BCLRPAT_A 0x60020
-#define _BCLRPAT_B 0x61020
-#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
-
-#define _TRANS_VSYNCSHIFT_A 0x60028
-#define _TRANS_VSYNCSHIFT_B 0x61028
-#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
-
-#define _TRANS_MULT_A 0x6002c
-#define _TRANS_MULT_B 0x6102c
-#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-
-/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
-#define PORTB_HOTPLUG_INT_EN (1 << 29)
-#define PORTC_HOTPLUG_INT_EN (1 << 28)
-#define PORTD_HOTPLUG_INT_EN (1 << 27)
-#define SDVOB_HOTPLUG_INT_EN (1 << 26)
-#define SDVOC_HOTPLUG_INT_EN (1 << 25)
-#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_HOTPLUG_INT_EN (1 << 9)
-#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
- PORTC_HOTPLUG_INT_EN | \
- PORTD_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
-/* must use period 64 on GM45 according to docs */
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
-#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
-#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
-#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
-#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-
-#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/* HDMI/DP bits are g4x+ */
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
-#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
-#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
-#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
-#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
-#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
-#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
-#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
-#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
-#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
-/* CRT/TV common between gen3+ */
-#define CRT_HOTPLUG_INT_STATUS (1 << 11)
-#define TV_HOTPLUG_INT_STATUS (1 << 10)
-#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
-#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
-#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
-#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
-#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
-#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
-
-/* SDVO is different across gen3/4 */
-#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
-#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
-/*
- * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
- * since reality corrobates that they're the same as on gen3. But keep these
- * bits here (and the comment!) to help any other lost wanderers back onto the
- * right tracks.
- */
-#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
-#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
-#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
-#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_G4X | \
- SDVOC_HOTPLUG_INT_STATUS_G4X | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_I915 | \
- SDVOC_HOTPLUG_INT_STATUS_I915 | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-/* SDVO and HDMI port control.
- * The same register may be used for SDVO or HDMI */
-#define _GEN3_SDVOB 0x61140
-#define _GEN3_SDVOC 0x61160
-#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
-#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
-#define GEN4_HDMIB GEN3_SDVOB
-#define GEN4_HDMIC GEN3_SDVOC
-#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
-#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
-#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
-#define PCH_SDVOB _MMIO(0xe1140)
-#define PCH_HDMIB PCH_SDVOB
-#define PCH_HDMIC _MMIO(0xe1150)
-#define PCH_HDMID _MMIO(0xe1160)
-
-#define PORT_DFT_I9XX _MMIO(0x61150)
-#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
-#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
-#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
-#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
-#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
-
-/* Gen 3 SDVO bits: */
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL_SHIFT 30
-#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/*
- * 915G/GM SDVO pixel multiplier.
- * Programmed value is multiplier - 1, up to 5x.
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
-#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
-#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
-#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
-#define SDVO_DETECTED (1 << 2)
-/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
- SDVO_INTERRUPT_ENABLE)
-#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
-
-/* Gen 4 SDVO/HDMI bits: */
-#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
-#define SDVO_COLOR_FORMAT_MASK (7 << 26)
-#define SDVO_ENCODING_SDVO (0 << 10)
-#define SDVO_ENCODING_HDMI (2 << 10)
-#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
-#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
-#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
-#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
-/* VSYNC/HSYNC bits new with 965, default is to be set */
-#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
-#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
-
-/* Gen 5 (IBX) SDVO/HDMI bits: */
-#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
-#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
-
-/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CPT 29
-#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-
-/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CHV 24
-#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-
-/* Video Data Island Packet control */
-#define VIDEO_DIP_DATA _MMIO(0x61178)
-/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
- * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
- * of the infoframe structure specified by CEA-861. */
-#define VIDEO_DIP_DATA_SIZE 32
-#define VIDEO_DIP_ASYNC_DATA_SIZE 36
-#define VIDEO_DIP_GMP_DATA_SIZE 36
-#define VIDEO_DIP_VSC_DATA_SIZE 36
-#define VIDEO_DIP_PPS_DATA_SIZE 132
-#define VIDEO_DIP_CTL _MMIO(0x61170)
-/* Pre HSW: */
-#define VIDEO_DIP_ENABLE (1 << 31)
-#define VIDEO_DIP_PORT(port) ((port) << 29)
-#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
-#define VIDEO_DIP_ENABLE_AVI (1 << 21)
-#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
-#define VIDEO_DIP_ENABLE_SPD (8 << 21)
-#define VIDEO_DIP_SELECT_AVI (0 << 19)
-#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
-#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
-#define VIDEO_DIP_SELECT_SPD (3 << 19)
-#define VIDEO_DIP_SELECT_MASK (3 << 19)
-#define VIDEO_DIP_FREQ_ONCE (0 << 16)
-#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
-#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
-#define VIDEO_DIP_FREQ_MASK (3 << 16)
-/* HSW and later: */
-#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 25)
-#define VSC_SELECT_SHIFT 25
-#define VSC_DIP_HW_HEA_DATA (0 << 25)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
-#define VSC_DIP_SW_HEA_DATA (3 << 25)
-#define VDIP_ENABLE_PPS (1 << 24)
-#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
-#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
-#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
-#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
-#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
-#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-/* ADL and later: */
-#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
-
-#define PCH_GTC_CTL _MMIO(0xe7000)
-#define PCH_GTC_ENABLE (1 << 31)
-
-/* Display Port */
-#define DP_A _MMIO(0x64000) /* eDP */
-#define DP_B _MMIO(0x64100)
-#define DP_C _MMIO(0x64200)
-#define DP_D _MMIO(0x64300)
-#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
-#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
-#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN (1 << 31)
-#define DP_PIPE_SEL_SHIFT 30
-#define DP_PIPE_SEL_MASK (1 << 30)
-#define DP_PIPE_SEL(pipe) ((pipe) << 30)
-#define DP_PIPE_SEL_SHIFT_IVB 29
-#define DP_PIPE_SEL_MASK_IVB (3 << 29)
-#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
-#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV (3 << 16)
-#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
-
-/* Link training mode - select a suitable mode for each stage */
-#define DP_LINK_TRAIN_PAT_1 (0 << 28)
-#define DP_LINK_TRAIN_PAT_2 (1 << 28)
-#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
-#define DP_LINK_TRAIN_OFF (3 << 28)
-#define DP_LINK_TRAIN_MASK (3 << 28)
-#define DP_LINK_TRAIN_SHIFT 28
-
-/* CPT Link training mode */
-#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
-#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
-#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
-#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
-#define DP_LINK_TRAIN_SHIFT_CPT 8
-
-/* Signal voltages. These are mostly controlled by the other end */
-#define DP_VOLTAGE_0_4 (0 << 25)
-#define DP_VOLTAGE_0_6 (1 << 25)
-#define DP_VOLTAGE_0_8 (2 << 25)
-#define DP_VOLTAGE_1_2 (3 << 25)
-#define DP_VOLTAGE_MASK (7 << 25)
-#define DP_VOLTAGE_SHIFT 25
-
-/* Signal pre-emphasis levels, like voltages, the other end tells us what
- * they want
- */
-#define DP_PRE_EMPHASIS_0 (0 << 22)
-#define DP_PRE_EMPHASIS_3_5 (1 << 22)
-#define DP_PRE_EMPHASIS_6 (2 << 22)
-#define DP_PRE_EMPHASIS_9_5 (3 << 22)
-#define DP_PRE_EMPHASIS_MASK (7 << 22)
-#define DP_PRE_EMPHASIS_SHIFT 22
-
-/* How many wires to use. I guess 3 was too hard */
-#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
-#define DP_PORT_WIDTH_MASK (7 << 19)
-#define DP_PORT_WIDTH_SHIFT 19
-
-/* Mystic DPCD version 1.1 special mode */
-#define DP_ENHANCED_FRAMING (1 << 18)
-
-/* eDP */
-#define DP_PLL_FREQ_270MHZ (0 << 16)
-#define DP_PLL_FREQ_162MHZ (1 << 16)
-#define DP_PLL_FREQ_MASK (3 << 16)
-
-/* locked once port is enabled */
-#define DP_PORT_REVERSAL (1 << 15)
-
-/* eDP */
-#define DP_PLL_ENABLE (1 << 14)
-
-/* sends the clock on lane 15 of the PEG for debug */
-#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
-
-#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-
-/* limit RGB values to avoid confusing TVs */
-#define DP_COLOR_RANGE_16_235 (1 << 8)
-
-/* Turn on the audio link */
-#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-
-/* vs and hs sync polarity */
-#define DP_SYNC_VS_HIGH (1 << 4)
-#define DP_SYNC_HS_HIGH (1 << 3)
-
-/* A fantasy */
-#define DP_DETECTED (1 << 2)
-
-/*
- * Computing GMCH M and N values for the Display Port link
- *
- * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
- *
- * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
- *
- * The GMCH value is used internally
- *
- * bytes_per_pixel is the number of bytes coming out of the plane,
- * which is after the LUTs, so we want the bytes for our color format.
- * For our current usage, this is always 3, one byte for R, G and B.
- */
-#define _PIPEA_DATA_M_G4X 0x70050
-#define _PIPEB_DATA_M_G4X 0x71050
-#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
-/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE_MASK REG_GENMASK(30, 25)
-#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
-#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
-#define DATA_LINK_N_MAX (0x800000)
-
-#define _PIPEA_DATA_N_G4X 0x70054
-#define _PIPEB_DATA_N_G4X 0x71054
-#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
-
-/*
- * Computing Link M and N values for the Display Port link
- *
- * Link M / N = pixel_clock / ls_clk
- *
- * (the DP spec calls pixel_clock the 'strm_clk')
- *
- * The Link value is transmitted in the Main Stream
- * Attributes and VB-ID.
- */
-#define _PIPEA_LINK_M_G4X 0x70060
-#define _PIPEB_LINK_M_G4X 0x71060
-#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
-
-#define _PIPEA_LINK_N_G4X 0x70064
-#define _PIPEB_LINK_N_G4X 0x71064
-#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
-
-/* Pipe A */
-#define _PIPEADSL 0x70000
-#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
-#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
-#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-
-#define _TRANSACONF 0x70008
-#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
-#define TRANSCONF_ENABLE REG_BIT(31)
-#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
-#define TRANSCONF_FORCE_BORDER REG_BIT(25)
-#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
-#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
-#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
-#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
-/*
- * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
- * DBL=power saving pixel doubling, PF-ID* requires panel fitter
- */
-#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
-#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
-#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
-#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
-#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
-#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
-#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
-#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
-#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
-#define TRANSCONF_DITHER_EN REG_BIT(4)
-#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
-#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
-#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
-#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
-#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
-#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
-
-#define _PIPEASTAT 0x70024
-#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
-#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
-#define PIPE_CRC_DONE_ENABLE (1UL << 28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
-#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
-#define PIPE_DPST_EVENT_STATUS (1UL << 7)
-#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
-#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_HBLANK_INT_STATUS (1UL << 0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
-#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
-#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-
-#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
-#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
-
-#define _PIPE_MISC_A 0x70030
-#define _PIPE_MISC_B 0x71030
-#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
-#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
-#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
-#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
-#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
-#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
-#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
-#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
-#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
-#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
-/*
- * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
- * valid values of: 6, 8, 10 BPC.
- * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
- * 6, 8, 10, 12 BPC.
- */
-#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
-#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
-#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
-#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
-#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
-#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
-#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
-#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
-#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
-#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
-
-#define _PIPE_MISC2_A 0x7002C
-#define _PIPE_MISC2_B 0x7102C
-#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
-#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
-
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
@@ -1705,141 +791,6 @@
#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
-#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
-#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
-#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
-#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
-#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
-#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
-#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
-#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
-#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
-#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
-#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
-#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
-#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
-#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
-#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
-#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
-#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
-#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
-#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
-#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
-#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
-#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
-#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
-#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
-#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
-#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
-#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
-#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-
-#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1 << 31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
-
-#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-
-/*
- * The two pipe frame counter registers are not synchronized, so
- * reading a stable value is somewhat tricky. The following code
- * should work:
- *
- * do {
- * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT;
- * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
- * PIPE_FRAME_LOW_SHIFT);
- * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT);
- * } while (high1 != high2);
- * frame = (high1 << 8) | low1;
- */
-#define _PIPEAFRAMEHIGH 0x70040
-#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
-#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-#define PIPE_FRAME_HIGH_SHIFT 0
-
-#define _PIPEAFRAMEPIXEL 0x70044
-#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
-#define PIPE_FRAME_LOW_MASK 0xff000000
-#define PIPE_FRAME_LOW_SHIFT 24
-#define PIPE_PIXEL_MASK 0x00ffffff
-#define PIPE_PIXEL_SHIFT 0
-
-/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_G4X 0x70040
-#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
-
-#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
-
-/* CHV pipe B blender */
-#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
-#define CHV_BLEND_MASK REG_GENMASK(31, 30)
-#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
-#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
-#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
-
-#define _CHV_CANVAS_A 0x60a04
-#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
-#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
-#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
-#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
-
-/* Display/Sprite base address macros */
-#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
-
-/*
- * VBIOS flags
- * gen2:
- * [00:06] alm,mgm
- * [10:16] all
- * [30:32] alm,mgm
- * gen3+:
- * [00:0f] all
- * [10:1f] all
- * [30:32] all
- */
-#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
-#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
-#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
-#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-
-/* VBIOS regs */
-#define VGACNTRL _MMIO(0x71400)
-# define VGA_DISP_DISABLE (1 << 31)
-# define VGA_2X_MODE (1 << 30)
-# define VGA_PIPE_B_SELECT (1 << 29)
-
-#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
-
-/* Ironlake */
-
-#define CPU_VGACNTRL _MMIO(0x41000)
-
-#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
-#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
-#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
-#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
-#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
-#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* refresh rate hardware control */
-#define RR_HW_CTL _MMIO(0x45300)
-#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
-#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
-
#define PCH_3DCGDIS0 _MMIO(0x46020)
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -1847,211 +798,6 @@
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
-#define _PIPEA_DATA_M1 0x60030
-#define _PIPEB_DATA_M1 0x61030
-#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
-
-#define _PIPEA_DATA_N1 0x60034
-#define _PIPEB_DATA_N1 0x61034
-#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
-
-#define _PIPEA_DATA_M2 0x60038
-#define _PIPEB_DATA_M2 0x61038
-#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
-
-#define _PIPEA_DATA_N2 0x6003c
-#define _PIPEB_DATA_N2 0x6103c
-#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
-
-#define _PIPEA_LINK_M1 0x60040
-#define _PIPEB_LINK_M1 0x61040
-#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
-
-#define _PIPEA_LINK_N1 0x60044
-#define _PIPEB_LINK_N1 0x61044
-#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
-
-#define _PIPEA_LINK_M2 0x60048
-#define _PIPEB_LINK_M2 0x61048
-#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
-
-#define _PIPEA_LINK_N2 0x6004c
-#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
-
-/*
- * Skylake scalers
- */
-#define _ID(id, a, b) _PICK_EVEN(id, a, b)
-#define _PS_1A_CTRL 0x68180
-#define _PS_2A_CTRL 0x68280
-#define _PS_1B_CTRL 0x68980
-#define _PS_2B_CTRL 0x68A80
-#define _PS_1C_CTRL 0x69180
-#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
- _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
-#define PS_SCALER_EN REG_BIT(31)
-#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
-#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
-#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
-#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
-#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
-#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
-#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
-#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
-#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
-#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
-#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
-#define PS_BINDING_MASK REG_GENMASK(27, 25)
-#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
-#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
-#define PS_FILTER_MASK REG_GENMASK(24, 23)
-#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
-#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
-#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
-#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
-#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
-#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
-#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
-#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
-#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
-#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
-#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
-#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
-#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
-#define PS_PWRUP_PROGRESS REG_BIT(17)
-#define PS_V_FILTER_BYPASS REG_BIT(8)
-#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
-#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
-#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
-#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
-#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
-#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
-#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
-#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
-#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
-#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
-#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
-#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
-#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
-#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
-#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
-
-#define _PS_PWR_GATE_1A 0x68160
-#define _PS_PWR_GATE_2A 0x68260
-#define _PS_PWR_GATE_1B 0x68960
-#define _PS_PWR_GATE_2B 0x68A60
-#define _PS_PWR_GATE_1C 0x69160
-#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
- _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
-#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
-#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
-#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
-#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
-#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
-#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
-#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
-#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
-#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
-#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
-#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
-
-#define _PS_WIN_POS_1A 0x68170
-#define _PS_WIN_POS_2A 0x68270
-#define _PS_WIN_POS_1B 0x68970
-#define _PS_WIN_POS_2B 0x68A70
-#define _PS_WIN_POS_1C 0x69170
-#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
- _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
-#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
-#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
-
-#define _PS_WIN_SZ_1A 0x68174
-#define _PS_WIN_SZ_2A 0x68274
-#define _PS_WIN_SZ_1B 0x68974
-#define _PS_WIN_SZ_2B 0x68A74
-#define _PS_WIN_SZ_1C 0x69174
-#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
- _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
-#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
-#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
-
-#define _PS_VSCALE_1A 0x68184
-#define _PS_VSCALE_2A 0x68284
-#define _PS_VSCALE_1B 0x68984
-#define _PS_VSCALE_2B 0x68A84
-#define _PS_VSCALE_1C 0x69184
-#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
- _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
-
-#define _PS_HSCALE_1A 0x68190
-#define _PS_HSCALE_2A 0x68290
-#define _PS_HSCALE_1B 0x68990
-#define _PS_HSCALE_2B 0x68A90
-#define _PS_HSCALE_1C 0x69190
-#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
- _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
-
-#define _PS_VPHASE_1A 0x68188
-#define _PS_VPHASE_2A 0x68288
-#define _PS_VPHASE_1B 0x68988
-#define _PS_VPHASE_2B 0x68A88
-#define _PS_VPHASE_1C 0x69188
-#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
- _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
-#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
-#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
-#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
-#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
-#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
-#define PS_PHASE_TRIP (1 << 0)
-
-#define _PS_HPHASE_1A 0x68194
-#define _PS_HPHASE_2A 0x68294
-#define _PS_HPHASE_1B 0x68994
-#define _PS_HPHASE_2B 0x68A94
-#define _PS_HPHASE_1C 0x69194
-#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
- _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
-
-#define _PS_ECC_STAT_1A 0x681D0
-#define _PS_ECC_STAT_2A 0x682D0
-#define _PS_ECC_STAT_1B 0x689D0
-#define _PS_ECC_STAT_2B 0x68AD0
-#define _PS_ECC_STAT_1C 0x691D0
-#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
- _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
-
-#define _PS_COEF_SET0_INDEX_1A 0x68198
-#define _PS_COEF_SET0_INDEX_2A 0x68298
-#define _PS_COEF_SET0_INDEX_1B 0x68998
-#define _PS_COEF_SET0_INDEX_2B 0x68A98
-#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
-#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
-
-#define _PS_COEF_SET0_DATA_1A 0x6819C
-#define _PS_COEF_SET0_DATA_2A 0x6829C
-#define _PS_COEF_SET0_DATA_1B 0x6899C
-#define _PS_COEF_SET0_DATA_2B 0x68A9C
-#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
-
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
@@ -2090,25 +836,6 @@
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
-/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1 << 30)
-#define DE_GSE_IVB (1 << 29)
-#define DE_PCH_EVENT_IVB (1 << 28)
-#define DE_DP_A_HOTPLUG_IVB (1 << 27)
-#define DE_AUX_CHANNEL_A_IVB (1 << 26)
-#define DE_EDP_PSR_INT_HSW (1 << 19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
-#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
-#define DE_PIPEC_VBLANK_IVB (1 << 10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
-#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
-#define DE_PIPEB_VBLANK_IVB (1 << 5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
-#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
-#define DE_PIPEA_VBLANK_IVB (1 << 0)
-#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
-
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1 << 31)
@@ -2148,8 +875,6 @@
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
-#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
-
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -2166,106 +891,6 @@
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
-#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
-#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
-#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
-#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
-#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
-#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */
-#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
-#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
-#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
-#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
-#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
-#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
-#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
-#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
-#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
-#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
-#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
-#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
-#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
-#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
-#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
-#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
-#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
-#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
-#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
-#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
- REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
-#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
-#define GEN8_PIPE_VSYNC REG_BIT(1)
-#define GEN8_PIPE_VBLANK REG_BIT(0)
-
-#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
- GEN8_DE_PIPE_IER(pipe), \
- GEN8_DE_PIPE_IIR(pipe))
-
-#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
-#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
-
-#define GEN8_DE_PORT_ISR _MMIO(0x44440)
-#define GEN8_DE_PORT_IMR _MMIO(0x44444)
-#define GEN8_DE_PORT_IIR _MMIO(0x44448)
-#define GEN8_DE_PORT_IER _MMIO(0x4444c)
-#define DSI1_NON_TE (1 << 31)
-#define DSI0_NON_TE (1 << 30)
-#define ICL_AUX_CHANNEL_E (1 << 29)
-#define ICL_AUX_CHANNEL_F (1 << 28)
-#define GEN9_AUX_CHANNEL_D (1 << 27)
-#define GEN9_AUX_CHANNEL_C (1 << 26)
-#define GEN9_AUX_CHANNEL_B (1 << 25)
-#define DSI1_TE (1 << 24)
-#define DSI0_TE (1 << 23)
-#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
-#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
-#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
-#define BXT_DE_PORT_GMBUS (1 << 1)
-#define GEN8_AUX_CHANNEL_A (1 << 0)
-#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
-#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
-#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
-#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
-#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
-#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
-#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
-#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
-#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
-#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
-#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
-
-#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
- GEN8_DE_PORT_IER, \
- GEN8_DE_PORT_IIR)
-
-#define GEN8_DE_MISC_ISR _MMIO(0x44460)
-#define GEN8_DE_MISC_IMR _MMIO(0x44464)
-#define GEN8_DE_MISC_IIR _MMIO(0x44468)
-#define GEN8_DE_MISC_IER _MMIO(0x4446c)
-#define XELPDP_RM_TIMEOUT REG_BIT(29)
-#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
-#define GEN8_DE_MISC_GSE REG_BIT(27)
-#define GEN8_DE_EDP_PSR REG_BIT(19)
-#define XELPDP_PMDEMAND_RSP REG_BIT(3)
-#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
-
-#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
- GEN8_DE_MISC_IER, \
- GEN8_DE_MISC_IIR)
-
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
#define GEN8_PCU_IIR _MMIO(0x444e8)
@@ -2298,110 +923,12 @@
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
-#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
-#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
-#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
-#define GEN11_DE_PCH_IRQ (1 << 23)
-#define GEN11_DE_MISC_IRQ (1 << 22)
-#define GEN11_DE_HPD_IRQ (1 << 21)
-#define GEN11_DE_PORT_IRQ (1 << 20)
-#define GEN11_DE_PIPE_C (1 << 18)
-#define GEN11_DE_PIPE_B (1 << 17)
-#define GEN11_DE_PIPE_A (1 << 16)
-
-#define GEN11_DE_HPD_ISR _MMIO(0x44470)
-#define GEN11_DE_HPD_IMR _MMIO(0x44474)
-#define GEN11_DE_HPD_IIR _MMIO(0x44478)
-#define GEN11_DE_HPD_IER _MMIO(0x4447c)
-#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC1))
-#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
-
-#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
- GEN11_DE_HPD_IER, \
- GEN11_DE_HPD_IIR)
-
-#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
-#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
-#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
-#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
-#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
-#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
-#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
-#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
-#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
-#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
-#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
-
-#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
- PICAINTERRUPT_IER, \
- PICAINTERRUPT_IIR)
-
-#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
-#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
-#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
-#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
-#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
-#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
-#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
-
-#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
-#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
-#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
-#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
-#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
-#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
-#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
-#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
-
-#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
-#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
-#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
-#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
-#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
-
-#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
-#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
-
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT REG_BIT(25)
#define ILK_DPARB_GATE REG_BIT(22)
#define ILK_VSDPFD_FULL REG_BIT(21)
-#define FUSE_STRAP _MMIO(0x42014)
-#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
-#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
-#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
-#define IVB_PIPE_C_DISABLE REG_BIT(28)
-#define ILK_HDCP_DISABLE REG_BIT(25)
-#define ILK_eDP_A_DISABLE REG_BIT(24)
-#define HSW_CDCLK_LIMIT REG_BIT(24)
-#define ILK_DESKTOP REG_BIT(23)
-#define HSW_CPU_SSC_ENABLE REG_BIT(21)
-
-#define FUSE_STRAP3 _MMIO(0x42020)
-#define HSW_REF_CLK_SELECT REG_BIT(1)
-
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE REG_BIT(28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE REG_BIT(9)
@@ -2426,25 +953,6 @@
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
-#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
-#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
-#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
-#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
-#define GLK_CL2_PWR_DOWN REG_BIT(12)
-#define GLK_CL1_PWR_DOWN REG_BIT(11)
-#define GLK_CL0_PWR_DOWN REG_BIT(10)
-
-#define CHICKEN_MISC_3 _MMIO(0x42088)
-#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
-#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
-#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
-
-#define CHICKEN_MISC_4 _MMIO(0x4208c)
-#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
-#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
-#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
-
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
@@ -2468,72 +976,11 @@
#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
-#define _CHICKEN_TRANS_A 0x420c0
-#define _CHICKEN_TRANS_B 0x420c4
-#define _CHICKEN_TRANS_C 0x420c8
-#define _CHICKEN_TRANS_EDP 0x420cc
-#define _CHICKEN_TRANS_D 0x420d8
-#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
- [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
- [TRANSCODER_A] = _CHICKEN_TRANS_A, \
- [TRANSCODER_B] = _CHICKEN_TRANS_B, \
- [TRANSCODER_C] = _CHICKEN_TRANS_C, \
- [TRANSCODER_D] = _CHICKEN_TRANS_D))
-#define _MTL_CHICKEN_TRANS_A 0x604e0
-#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
- _MTL_CHICKEN_TRANS_A, \
- _MTL_CHICKEN_TRANS_B)
-#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
-#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
-#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
-#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
-#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
-#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
-#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
-#define DP_FEC_BS_JITTER_WA REG_BIT(15)
-#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
-#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
-#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
-
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
#define DISP_FBC_WM_DIS REG_BIT(15)
-#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
-#define DISP_IPC_ENABLE REG_BIT(3)
-
-#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
-#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
-
-#define _BW_BUDDY0_CTL 0x45130
-#define _BW_BUDDY1_CTL 0x45140
-#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_CTL, \
- _BW_BUDDY1_CTL))
-#define BW_BUDDY_DISABLE REG_BIT(31)
-#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
-#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
-
-#define _BW_BUDDY0_PAGE_MASK 0x45134
-#define _BW_BUDDY1_PAGE_MASK 0x45144
-#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_PAGE_MASK, \
- _BW_BUDDY1_PAGE_MASK))
-
-#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
-#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
-
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -2550,520 +997,24 @@
#define MASK_WAKEMEM REG_BIT(13)
#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
-#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
-#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
-#define DCPR_MASK_LPMODE REG_BIT(26)
-#define DCPR_SEND_RESP_IMM REG_BIT(25)
-#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
-
-#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
-#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
-
-#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
-#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define ICL_DFSM_DMC_DISABLE (1 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
-#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
-#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
-
-#define XE2LPD_DE_CAP _MMIO(0x41100)
-#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
-#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
-#define XE2LPD_DE_CAP_DSC_REMOVED 1
-#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
-#define XE2LPD_DE_CAP_SCALER_SINGLE 1
-
-#define SKL_DSSM _MMIO(0x51004)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
-
#define GMD_ID_DISPLAY _MMIO(0x510a0)
#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
#define GMD_ID_STEP REG_GENMASK(5, 0)
-/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
-#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
-#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
-#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
-#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
-#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
-
/* PCH */
-#define PCH_DISPLAY_BASE 0xc0000u
-
-/* south display engine interrupt: IBX */
-#define SDE_AUDIO_POWER_D (1 << 27)
-#define SDE_AUDIO_POWER_C (1 << 26)
-#define SDE_AUDIO_POWER_B (1 << 25)
-#define SDE_AUDIO_POWER_SHIFT (25)
-#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
-#define SDE_GMBUS (1 << 24)
-#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
-#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
-#define SDE_AUDIO_HDCP_MASK (3 << 22)
-#define SDE_AUDIO_TRANSB (1 << 21)
-#define SDE_AUDIO_TRANSA (1 << 20)
-#define SDE_AUDIO_TRANS_MASK (3 << 20)
-#define SDE_POISON (1 << 19)
-/* 18 reserved */
-#define SDE_FDI_RXB (1 << 17)
-#define SDE_FDI_RXA (1 << 16)
-#define SDE_FDI_MASK (3 << 16)
-#define SDE_AUXD (1 << 15)
-#define SDE_AUXC (1 << 14)
-#define SDE_AUXB (1 << 13)
-#define SDE_AUX_MASK (7 << 13)
-/* 12 reserved */
-#define SDE_CRT_HOTPLUG (1 << 11)
-#define SDE_PORTD_HOTPLUG (1 << 10)
-#define SDE_PORTC_HOTPLUG (1 << 9)
-#define SDE_PORTB_HOTPLUG (1 << 8)
-#define SDE_SDVOB_HOTPLUG (1 << 6)
-#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
- SDE_SDVOB_HOTPLUG | \
- SDE_PORTB_HOTPLUG | \
- SDE_PORTC_HOTPLUG | \
- SDE_PORTD_HOTPLUG)
-#define SDE_TRANSB_CRC_DONE (1 << 5)
-#define SDE_TRANSB_CRC_ERR (1 << 4)
-#define SDE_TRANSB_FIFO_UNDER (1 << 3)
-#define SDE_TRANSA_CRC_DONE (1 << 2)
-#define SDE_TRANSA_CRC_ERR (1 << 1)
-#define SDE_TRANSA_FIFO_UNDER (1 << 0)
-#define SDE_TRANS_MASK (0x3f)
-
-/* south display engine interrupt: CPT - CNP */
-#define SDE_AUDIO_POWER_D_CPT (1 << 31)
-#define SDE_AUDIO_POWER_C_CPT (1 << 30)
-#define SDE_AUDIO_POWER_B_CPT (1 << 29)
-#define SDE_AUDIO_POWER_SHIFT_CPT 29
-#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
-#define SDE_AUXD_CPT (1 << 27)
-#define SDE_AUXC_CPT (1 << 26)
-#define SDE_AUXB_CPT (1 << 25)
-#define SDE_AUX_MASK_CPT (7 << 25)
-#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
-#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
-#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
-#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
-#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
-#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
-#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
- SDE_SDVOB_HOTPLUG_CPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT)
-#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT | \
- SDE_PORTA_HOTPLUG_SPT)
-#define SDE_GMBUS_CPT (1 << 17)
-#define SDE_ERROR_CPT (1 << 16)
-#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
-#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
-#define SDE_FDI_RXC_CPT (1 << 8)
-#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
-#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
-#define SDE_FDI_RXB_CPT (1 << 4)
-#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
-#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
-#define SDE_FDI_RXA_CPT (1 << 0)
-#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
- SDE_AUDIO_CP_REQ_B_CPT | \
- SDE_AUDIO_CP_REQ_A_CPT)
-#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
- SDE_AUDIO_CP_CHG_B_CPT | \
- SDE_AUDIO_CP_CHG_A_CPT)
-#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
- SDE_FDI_RXB_CPT | \
- SDE_FDI_RXA_CPT)
-
-/* south display engine interrupt: ICP/TGP/MTP */
-#define SDE_PICAINTERRUPT REG_BIT(31)
-#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
-#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
-#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
-#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
-#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
-
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
-#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
- SDEIER, \
- SDEIIR)
-
-#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1 << 31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
-
-/* digital port hotplug */
-#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
-#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
-#define BXT_DDIA_HPD_INVERT (1 << 27)
-#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
-#define PORTD_HOTPLUG_ENABLE (1 << 20)
-#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
-#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
-#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
-#define PORTC_HOTPLUG_ENABLE (1 << 12)
-#define BXT_DDIC_HPD_INVERT (1 << 11)
-#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
-#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
-#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
-#define PORTB_HOTPLUG_ENABLE (1 << 4)
-#define BXT_DDIB_HPD_INVERT (1 << 3)
-#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
-#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
-#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
- BXT_DDIB_HPD_INVERT | \
- BXT_DDIC_HPD_INVERT)
-
-#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
-#define PORTE_HOTPLUG_ENABLE (1 << 4)
-#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* This register is a reuse of PCH_PORT_HOTPLUG register. The
- * functionality covered in PCH_PORT_HOTPLUG is split into
- * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
- */
-
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-
-#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
-#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define SHPD_FILTER_CNT _MMIO(0xc4038)
-#define SHPD_FILTER_CNT_500_ADJ 0x001D9
-#define SHPD_FILTER_CNT_250 0x000F8
-
-#define _PCH_DPLL_A 0xc6014
-#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
-
-#define _PCH_FPA0 0xc6040
-#define _PCH_FPB0 0xc6048
-#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define FP_CB_TUNE (0x3 << 22)
-
-#define _PCH_FPA1 0xc6044
-#define _PCH_FPB1 0xc604c
-#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
-
-#define PCH_DPLL_TEST _MMIO(0xc606c)
-
-#define PCH_DREF_CONTROL _MMIO(0xC6200)
-#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
-#define DREF_SSC_SOURCE_DISABLE (0 << 11)
-#define DREF_SSC_SOURCE_ENABLE (2 << 11)
-#define DREF_SSC_SOURCE_MASK (3 << 11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
-#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
-#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
-#define DREF_SSC4_DOWNSPREAD (0 << 6)
-#define DREF_SSC4_CENTERSPREAD (1 << 6)
-#define DREF_SSC1_DISABLE (0 << 1)
-#define DREF_SSC1_ENABLE (1 << 1)
-#define DREF_SSC4_DISABLE (0)
-#define DREF_SSC4_ENABLE (1)
-
-#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
-#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3 << 12)
-#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3 << 10)
-#define RAWCLK_FREQ_MASK 0x3ff
-#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
-#define CNP_RAWCLK_DIV(div) ((div) << 16)
-#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_DEN(den) ((den) << 26)
-#define ICP_RAWCLK_NUM(num) ((num) << 11)
-
-#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
-
-#define PCH_SSC4_PARMS _MMIO(0xc6210)
-#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
-
-#define PCH_DPLL_SEL _MMIO(0xc7000)
-#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
-#define TRANS_DPLLA_SEL(pipe) 0
-#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
-
-/* transcoder */
-
-#define _PCH_TRANS_HTOTAL_A 0xe0000
-#define _PCH_TRANS_HTOTAL_B 0xe1000
-#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
-#define TRANS_HTOTAL_SHIFT 16
-#define TRANS_HACTIVE_SHIFT 0
-
-#define _PCH_TRANS_HBLANK_A 0xe0004
-#define _PCH_TRANS_HBLANK_B 0xe1004
-#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
-#define TRANS_HBLANK_END_SHIFT 16
-#define TRANS_HBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_HSYNC_A 0xe0008
-#define _PCH_TRANS_HSYNC_B 0xe1008
-#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
-#define TRANS_HSYNC_END_SHIFT 16
-#define TRANS_HSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VTOTAL_A 0xe000c
-#define _PCH_TRANS_VTOTAL_B 0xe100c
-#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
-#define TRANS_VTOTAL_SHIFT 16
-#define TRANS_VACTIVE_SHIFT 0
-
-#define _PCH_TRANS_VBLANK_A 0xe0010
-#define _PCH_TRANS_VBLANK_B 0xe1010
-#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
-#define TRANS_VBLANK_END_SHIFT 16
-#define TRANS_VBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNC_A 0xe0014
-#define _PCH_TRANS_VSYNC_B 0xe1014
-#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
-#define TRANS_VSYNC_END_SHIFT 16
-#define TRANS_VSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
-#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
-#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
-
-#define _PCH_TRANSA_DATA_M1 0xe0030
-#define _PCH_TRANSB_DATA_M1 0xe1030
-#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
-
-#define _PCH_TRANSA_DATA_N1 0xe0034
-#define _PCH_TRANSB_DATA_N1 0xe1034
-#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
-
-#define _PCH_TRANSA_DATA_M2 0xe0038
-#define _PCH_TRANSB_DATA_M2 0xe1038
-#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
-
-#define _PCH_TRANSA_DATA_N2 0xe003c
-#define _PCH_TRANSB_DATA_N2 0xe103c
-#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
-
-#define _PCH_TRANSA_LINK_M1 0xe0040
-#define _PCH_TRANSB_LINK_M1 0xe1040
-#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
-
-#define _PCH_TRANSA_LINK_N1 0xe0044
-#define _PCH_TRANSB_LINK_N1 0xe1044
-#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
-
-#define _PCH_TRANSA_LINK_M2 0xe0048
-#define _PCH_TRANSB_LINK_M2 0xe1048
-#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
-
-#define _PCH_TRANSA_LINK_N2 0xe004c
-#define _PCH_TRANSB_LINK_N2 0xe104c
-#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
-
-/* Per-transcoder DIP controls (PCH) */
-#define _VIDEO_DIP_CTL_A 0xe0200
-#define _VIDEO_DIP_CTL_B 0xe1200
-#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
-
-#define _VIDEO_DIP_DATA_A 0xe0208
-#define _VIDEO_DIP_DATA_B 0xe1208
-#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
-
-#define _VIDEO_DIP_GCP_A 0xe0210
-#define _VIDEO_DIP_GCP_B 0xe1210
-#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define GCP_COLOR_INDICATION (1 << 2)
-#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
-#define GCP_AV_MUTE (1 << 0)
-
-/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A 0x60200
-#define _VLV_VIDEO_DIP_CTL_B 0x61170
-#define _CHV_VIDEO_DIP_CTL_C 0x611f0
-#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, \
- _CHV_VIDEO_DIP_CTL_C)
-
-#define _VLV_VIDEO_DIP_DATA_A 0x60208
-#define _VLV_VIDEO_DIP_DATA_B 0x61174
-#define _CHV_VIDEO_DIP_DATA_C 0x611f4
-#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, \
- _CHV_VIDEO_DIP_DATA_C)
-
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
-#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
- _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
-
-/* Haswell DIP controls */
-#define _HSW_VIDEO_DIP_CTL_A 0x60200
-#define _HSW_VIDEO_DIP_CTL_B 0x61200
-#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
-
-#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
-#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
-#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
-#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
-#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
-#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
-#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
-#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
-#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
-#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
-#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-
-/*ADLP and later: */
-#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
-#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
-#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
- _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
-
-#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
-#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
-#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
-#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
-#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
-#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
-#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
-#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
-#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
-#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
-#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
-#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
-
-#define _HSW_VIDEO_DIP_GCP_A 0x60210
-#define _HSW_VIDEO_DIP_GCP_B 0x61210
-#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
-
/* Icelake PPS_DATA and _ECC DIP Registers.
* These are available for transcoders B,C and eDP.
* Adding the _A so as to reuse the _MMIO_TRANS2
* definition, with which it offsets to the right location.
*/
-#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
-#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
-#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-
-#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
-#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
-
-#define _HSW_STEREO_3D_CTL_A 0x70020
-#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
-#define S3D_ENABLE (1 << 31)
-
-#define _PCH_TRANSACONF 0xf0008
-#define _PCH_TRANSBCONF 0xf1008
-#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
-#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_ENABLE REG_BIT(31)
-#define TRANS_STATE_ENABLE REG_BIT(30)
-#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
-#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
-#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
-#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
-#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
-#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
-#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
-#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
-#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
-#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
-#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
-
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
@@ -3116,88 +1067,6 @@
#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
-#define PCH_DP_B _MMIO(0xe4100)
-#define PCH_DP_C _MMIO(0xe4200)
-#define PCH_DP_D _MMIO(0xe4300)
-
-/* CPT */
-#define _TRANS_DP_CTL_A 0xe0300
-#define _TRANS_DP_CTL_B 0xe1300
-#define _TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
-#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
-#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
-#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
-#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
-#define TRANS_DP_ENH_FRAMING REG_BIT(18)
-#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
-#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
-#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
-#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
-#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
-#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
-
-#define _TRANS_DP2_CTL_A 0x600a0
-#define _TRANS_DP2_CTL_B 0x610a0
-#define _TRANS_DP2_CTL_C 0x620a0
-#define _TRANS_DP2_CTL_D 0x630a0
-#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
-#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
-#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
-#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
-
-#define _TRANS_DP2_VFREQHIGH_A 0x600a4
-#define _TRANS_DP2_VFREQHIGH_B 0x610a4
-#define _TRANS_DP2_VFREQHIGH_C 0x620a4
-#define _TRANS_DP2_VFREQHIGH_D 0x630a4
-#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
-
-#define _TRANS_DP2_VFREQLOW_A 0x600a8
-#define _TRANS_DP2_VFREQLOW_B 0x610a8
-#define _TRANS_DP2_VFREQLOW_C 0x620a8
-#define _TRANS_DP2_VFREQLOW_D 0x630a8
-#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
-
-#define _DP_MIN_HBLANK_CTL_A 0x600ac
-#define _DP_MIN_HBLANK_CTL_B 0x610ac
-#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
-
-/* SNB eDP training params */
-/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
-/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
-
-/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
-
-/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
-
#define VLV_PMWGICZ _MMIO(0x1300a4)
#define HSW_EDRAM_CAP _MMIO(0x120010)
@@ -3206,10 +1075,6 @@
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
-#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
-#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
-#define PIXEL_OVERLAP_CNT_SHIFT 30
-
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
@@ -3338,838 +1203,12 @@
*/
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
-/*
- * HSW - ICL power wells
- *
- * Platforms have up to 3 power well control register sets, each set
- * controlling up to 16 power wells via a request/status HW flag tuple:
- * - main (HSW_PWR_WELL_CTL[1-4])
- * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
- * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
- * Each control register set consists of up to 4 registers used by different
- * sources that can request a power well to be enabled:
- * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
- * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
- * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
- * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
- */
-#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
-#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
-#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
-#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
-#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
-#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
-
-/* HSW/BDW power well */
-#define HSW_PW_CTL_IDX_GLOBAL 15
-
-/* SKL/BXT/GLK power wells */
-#define SKL_PW_CTL_IDX_PW_2 15
-#define SKL_PW_CTL_IDX_PW_1 14
-#define GLK_PW_CTL_IDX_AUX_C 10
-#define GLK_PW_CTL_IDX_AUX_B 9
-#define GLK_PW_CTL_IDX_AUX_A 8
-#define SKL_PW_CTL_IDX_DDI_D 4
-#define SKL_PW_CTL_IDX_DDI_C 3
-#define SKL_PW_CTL_IDX_DDI_B 2
-#define SKL_PW_CTL_IDX_DDI_A_E 1
-#define GLK_PW_CTL_IDX_DDI_A 1
-#define SKL_PW_CTL_IDX_MISC_IO 0
-
-/* ICL/TGL - power wells */
-#define TGL_PW_CTL_IDX_PW_5 4
-#define ICL_PW_CTL_IDX_PW_4 3
-#define ICL_PW_CTL_IDX_PW_3 2
-#define ICL_PW_CTL_IDX_PW_2 1
-#define ICL_PW_CTL_IDX_PW_1 0
-
-/* XE_LPD - power wells */
-#define XELPD_PW_CTL_IDX_PW_D 8
-#define XELPD_PW_CTL_IDX_PW_C 7
-#define XELPD_PW_CTL_IDX_PW_B 6
-#define XELPD_PW_CTL_IDX_PW_A 5
-
-#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
-#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
-#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
-#define TGL_PW_CTL_IDX_AUX_TBT6 14
-#define TGL_PW_CTL_IDX_AUX_TBT5 13
-#define TGL_PW_CTL_IDX_AUX_TBT4 12
-#define ICL_PW_CTL_IDX_AUX_TBT4 11
-#define TGL_PW_CTL_IDX_AUX_TBT3 11
-#define ICL_PW_CTL_IDX_AUX_TBT3 10
-#define TGL_PW_CTL_IDX_AUX_TBT2 10
-#define ICL_PW_CTL_IDX_AUX_TBT2 9
-#define TGL_PW_CTL_IDX_AUX_TBT1 9
-#define ICL_PW_CTL_IDX_AUX_TBT1 8
-#define TGL_PW_CTL_IDX_AUX_TC6 8
-#define XELPD_PW_CTL_IDX_AUX_E 8
-#define TGL_PW_CTL_IDX_AUX_TC5 7
-#define XELPD_PW_CTL_IDX_AUX_D 7
-#define TGL_PW_CTL_IDX_AUX_TC4 6
-#define ICL_PW_CTL_IDX_AUX_F 5
-#define TGL_PW_CTL_IDX_AUX_TC3 5
-#define ICL_PW_CTL_IDX_AUX_E 4
-#define TGL_PW_CTL_IDX_AUX_TC2 4
-#define ICL_PW_CTL_IDX_AUX_D 3
-#define TGL_PW_CTL_IDX_AUX_TC1 3
-#define ICL_PW_CTL_IDX_AUX_C 2
-#define ICL_PW_CTL_IDX_AUX_B 1
-#define ICL_PW_CTL_IDX_AUX_A 0
-
-#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
-#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
-#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
-#define XELPD_PW_CTL_IDX_DDI_E 8
-#define TGL_PW_CTL_IDX_DDI_TC6 8
-#define XELPD_PW_CTL_IDX_DDI_D 7
-#define TGL_PW_CTL_IDX_DDI_TC5 7
-#define TGL_PW_CTL_IDX_DDI_TC4 6
-#define ICL_PW_CTL_IDX_DDI_F 5
-#define TGL_PW_CTL_IDX_DDI_TC3 5
-#define ICL_PW_CTL_IDX_DDI_E 4
-#define TGL_PW_CTL_IDX_DDI_TC2 4
-#define ICL_PW_CTL_IDX_DDI_D 3
-#define TGL_PW_CTL_IDX_DDI_TC1 3
-#define ICL_PW_CTL_IDX_DDI_C 2
-#define ICL_PW_CTL_IDX_DDI_B 1
-#define ICL_PW_CTL_IDX_DDI_A 0
-
-/* HSW - power well misc debug registers */
-#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
-#define HSW_PWR_WELL_FORCE_ON (1 << 19)
-#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
-
-/* SKL Fuse Status */
-enum skl_power_gate {
- SKL_PG0,
- SKL_PG1,
- SKL_PG2,
- ICL_PG3,
- ICL_PG4,
-};
-
-#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2
- */
-#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4
- */
-#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
-#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-
-/* Per-pipe DDI Function Control */
-#define _TRANS_DDI_FUNC_CTL_A 0x60400
-#define _TRANS_DDI_FUNC_CTL_B 0x61400
-#define _TRANS_DDI_FUNC_CTL_C 0x62400
-#define _TRANS_DDI_FUNC_CTL_D 0x63400
-#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
-#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
-
-#define TRANS_DDI_FUNC_ENABLE (1 << 31)
-/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_SHIFT 28
-#define TGL_TRANS_DDI_PORT_SHIFT 27
-#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
-#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
-#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
-#define TRANS_DDI_BPC_MASK (7 << 20)
-#define TRANS_DDI_BPC_8 (0 << 20)
-#define TRANS_DDI_BPC_10 (1 << 20)
-#define TRANS_DDI_BPC_6 (2 << 20)
-#define TRANS_DDI_BPC_12 (3 << 20)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
-#define TRANS_DDI_PVSYNC (1 << 17)
-#define TRANS_DDI_PHSYNC (1 << 16)
-#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
-#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
-#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
-#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
-#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
-#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
-#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
- REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
-#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
-#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
-#define TRANS_DDI_BFI_ENABLE (1 << 4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
-#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
-#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
-#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
- | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
- | TRANS_DDI_HDMI_SCRAMBLING)
-
-#define _TRANS_DDI_FUNC_CTL2_A 0x60404
-#define _TRANS_DDI_FUNC_CTL2_B 0x61404
-#define _TRANS_DDI_FUNC_CTL2_C 0x62404
-#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
-#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
-#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
-#define CMTG_SECONDARY_MODE REG_BIT(3)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
-
-#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
-#define DISABLE_DPT_CLK_GATING REG_BIT(1)
-
-/* DisplayPort Transport Control */
-#define _DP_TP_CTL_A 0x64040
-#define _DP_TP_CTL_B 0x64140
-#define _TGL_DP_TP_CTL_A 0x60540
-#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE REG_BIT(31)
-#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
-#define DP_TP_CTL_MODE_MASK REG_BIT(27)
-#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
-#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
-#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
-#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
-#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
-#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
-#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
-
-/* DisplayPort Transport Status */
-#define _DP_TP_STATUS_A 0x64044
-#define _DP_TP_STATUS_B 0x64144
-#define _TGL_DP_TP_STATUS_A 0x60544
-#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
-#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
-#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
-#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
-#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
-#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
-
-/* DDI Buffer Control */
-#define _DDI_BUF_CTL_A 0x64000
-#define _DDI_BUF_CTL_B 0x64100
-/* Known as DDI_CTL_DE in MTL+ */
-#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE REG_BIT(31)
-#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
-#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
-#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
-#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
-#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
-#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
-#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
-#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
-#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
-#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
-#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
-#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
-#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
- (symbols))
-#define DDI_BUF_IS_IDLE REG_BIT(7)
-#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
-#define DDI_A_4_LANES REG_BIT(4)
-#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
- ((width) == 3 ? 4 : (width) - 1))
-#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
-
-/* DDI Buffer Translations */
-#define _DDI_BUF_TRANS_A 0x64E00
-#define _DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
-#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
-#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
-
-/* DDI DP Compliance Control */
-#define _DDI_DP_COMP_CTL_A 0x605F0
-#define _DDI_DP_COMP_CTL_B 0x615F0
-#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
-#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
-#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
-#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
-#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
-#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
-#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
-
-/* DDI DP Compliance Pattern */
-#define _DDI_DP_COMP_PAT_A 0x605F4
-#define _DDI_DP_COMP_PAT_B 0x615F4
-#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
-
-/* Sideband Interface (SBI) is programmed indirectly, via
- * SBI_ADDR, which contains the register offset; and SBI_DATA,
- * which contains the payload */
-#define SBI_ADDR _MMIO(0xC6000)
-#define SBI_DATA _MMIO(0xC6004)
-#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0 << 16)
-#define SBI_CTL_DEST_MPHY (0x1 << 16)
-#define SBI_CTL_OP_IORD (0x2 << 8)
-#define SBI_CTL_OP_IOWR (0x3 << 8)
-#define SBI_CTL_OP_CRRD (0x6 << 8)
-#define SBI_CTL_OP_CRWR (0x7 << 8)
-#define SBI_RESPONSE_FAIL (0x1 << 1)
-#define SBI_RESPONSE_SUCCESS (0x0 << 1)
-#define SBI_BUSY (0x1 << 0)
-#define SBI_READY (0x0 << 0)
-
-/* SBI offsets */
-#define SBI_SSCDIVINTPHASE 0x0200
-#define SBI_SSCDIVINTPHASE6 0x0600
-#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
-#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
-#define SBI_SSCDITHPHASE 0x0204
-#define SBI_SSCCTL 0x020c
-#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1 << 3)
-#define SBI_SSCCTL_DISABLE (1 << 0)
-#define SBI_SSCAUXDIV6 0x0610
-#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
-#define SBI_DBUFF0 0x2a00
-#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
-
-/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1 << 0)
-#define PIXCLK_GATE_GATE (0 << 0)
-
-/* SPLL */
-#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1 << 31)
-#define SPLL_REF_BCLK (0 << 28)
-#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define SPLL_REF_NON_SSC_HSW (2 << 28)
-#define SPLL_REF_PCH_SSC_BDW (2 << 28)
-#define SPLL_REF_LCPLL (3 << 28)
-#define SPLL_REF_MASK (3 << 28)
-#define SPLL_FREQ_810MHz (0 << 26)
-#define SPLL_FREQ_1350MHz (1 << 26)
-#define SPLL_FREQ_2700MHz (2 << 26)
-#define SPLL_FREQ_MASK (3 << 26)
-
-/* WRPLL */
-#define _WRPLL_CTL1 0x46040
-#define _WRPLL_CTL2 0x46060
-#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1 << 31)
-#define WRPLL_REF_BCLK (0 << 28)
-#define WRPLL_REF_PCH_SSC (1 << 28)
-#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
-#define WRPLL_REF_LCPLL (3 << 28)
-#define WRPLL_REF_MASK (3 << 28)
-/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
-#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x) << 8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
-#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
-#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
-
-/* Port clock selection */
-#define _PORT_CLK_SEL_A 0x46100
-#define _PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
-#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
-#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
-#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
-#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
-#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
-#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
-#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
-#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
-
-/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
-#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
-#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
-#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
-#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
-#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
-#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
-#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
-#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
-
-/* Transcoder clock selection */
-#define _TRANS_CLK_SEL_A 0x46140
-#define _TRANS_CLK_SEL_B 0x46144
-#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
-/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
-#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
-#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
-#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
-
-
-#define CDCLK_FREQ _MMIO(0x46200)
-
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define _TRANSC_MSA_MISC 0x62410
-#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
-/* See DP_MSA_MISC_* for the bit definitions */
-
-#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
-#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
-#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
-#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
-#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
-#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
-
-/* LCPLL Control */
-#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1 << 31)
-#define LCPLL_PLL_LOCK (1 << 30)
-#define LCPLL_REF_NON_SSC (0 << 28)
-#define LCPLL_REF_BCLK (2 << 28)
-#define LCPLL_REF_PCH_SSC (3 << 28)
-#define LCPLL_REF_MASK (3 << 28)
-#define LCPLL_CLK_FREQ_MASK (3 << 26)
-#define LCPLL_CLK_FREQ_450 (0 << 26)
-#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
-#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
-#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
-#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
-#define LCPLL_CD_SOURCE_FCLK (1 << 21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
-
-/*
- * SKL Clocks
- */
-
-/* CDCLK_CTL */
-#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
-#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
-#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
-#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
-#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
-#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
-#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
-#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
-#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
-#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
-#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
-#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
-#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
-#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
-/* CDCLK_SQUASH_CTL */
-#define CDCLK_SQUASH_CTL _MMIO(0x46008)
-#define CDCLK_SQUASH_ENABLE REG_BIT(31)
-#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
-#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
-#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
-#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
-
-/* LCPLL_CTL */
-#define LCPLL1_CTL _MMIO(0x46010)
-#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1 << 31)
-
-/* DPLL control1 */
-#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
-#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
-#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
-#define DPLL_CTRL1_LINK_RATE_2700 0
-#define DPLL_CTRL1_LINK_RATE_1350 1
-#define DPLL_CTRL1_LINK_RATE_810 2
-#define DPLL_CTRL1_LINK_RATE_1620 3
-#define DPLL_CTRL1_LINK_RATE_1080 4
-#define DPLL_CTRL1_LINK_RATE_2160 5
-
-/* DPLL control2 */
-#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
-
-/* DPLL Status */
-#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1 << ((id) * 8))
-
-/* DPLL cfg */
-#define _DPLL1_CFGCR1 0x6C040
-#define _DPLL2_CFGCR1 0x6C048
-#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
-#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
-#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
-
-#define _DPLL1_CFGCR2 0x6C044
-#define _DPLL2_CFGCR2 0x6C04C
-#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
-#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
-#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
-#define DPLL_CFGCR2_KDIV_5 (0 << 5)
-#define DPLL_CFGCR2_KDIV_2 (1 << 5)
-#define DPLL_CFGCR2_KDIV_3 (2 << 5)
-#define DPLL_CFGCR2_KDIV_1 (3 << 5)
-#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
-#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR2_PDIV_1 (0 << 2)
-#define DPLL_CFGCR2_PDIV_2 (1 << 2)
-#define DPLL_CFGCR2_PDIV_3 (2 << 2)
-#define DPLL_CFGCR2_PDIV_7 (4 << 2)
-#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
-#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
-
-/* ICL Clocks */
-#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
- (tc_port) + 12 : \
- (tc_port) - TC_PORT_4 + 21))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
- (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
- ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/*
- * DG1 Clocks
- * First registers controls the first A and B, while the second register
- * controls the phy C and D. The bits on these registers are the
- * same, but refer to different phys
- */
-#define _DG1_DPCLKA_CFGCR0 0x164280
-#define _DG1_DPCLKA1_CFGCR0 0x16C280
-#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
-#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
-#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
- _DG1_DPCLKA_CFGCR0, \
- _DG1_DPCLKA1_CFGCR0)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/* ADLS Clocks */
-#define _ADLS_DPCLKA_CFGCR0 0x164280
-#define _ADLS_DPCLKA_CFGCR1 0x1642BC
-#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
- _ADLS_DPCLKA_CFGCR0, \
- _ADLS_DPCLKA_CFGCR1)
-#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
-/* ADLS DPCLKA_CFGCR0 DDI mask */
-#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
-#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
-/* ADLS DPCLKA_CFGCR1 DDI mask */
-#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
-#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
- ADLS_DPCLKA_DDIA_SEL_MASK, \
- ADLS_DPCLKA_DDIB_SEL_MASK, \
- ADLS_DPCLKA_DDII_SEL_MASK, \
- ADLS_DPCLKA_DDIJ_SEL_MASK, \
- ADLS_DPCLKA_DDIK_SEL_MASK)
-
-/* ICL PLL */
-#define _DPLL0_ENABLE 0x46010
-#define _DPLL1_ENABLE 0x46014
-#define _ADLS_DPLL2_ENABLE 0x46018
-#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE REG_BIT(31)
-#define PLL_LOCK REG_BIT(30)
-#define PLL_POWER_ENABLE REG_BIT(27)
-#define PLL_POWER_STATE REG_BIT(26)
-#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
-
-#define _DG2_PLL3_ENABLE 0x4601C
-
-#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
-
-#define TBT_PLL_ENABLE _MMIO(0x46020)
-
-#define _MG_PLL1_ENABLE 0x46030
-#define _MG_PLL2_ENABLE 0x46034
-#define _MG_PLL3_ENABLE 0x46038
-#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as _DPLL0_ENABLE */
-#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
- _MG_PLL2_ENABLE)
-
-/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
-
-/* ADL-P Type C PLL */
-#define PORTTC1_PLL_ENABLE 0x46038
-#define PORTTC2_PLL_ENABLE 0x46040
-#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
- PORTTC1_PLL_ENABLE, \
- PORTTC2_PLL_ENABLE)
-
-#define _ICL_DPLL0_CFGCR0 0x164000
-#define _ICL_DPLL1_CFGCR0 0x164080
-#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
- _ICL_DPLL1_CFGCR0)
-#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
-#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
-#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
-#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
-#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
-#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
-#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
-#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
-#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
-#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
-
-#define _ICL_DPLL0_CFGCR1 0x164004
-#define _ICL_DPLL1_CFGCR1 0x164084
-#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
- _ICL_DPLL1_CFGCR1)
-#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
-#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
-#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
-#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
-#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
-#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
-#define DPLL_CFGCR1_KDIV_SHIFT (6)
-#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
-#define DPLL_CFGCR1_KDIV_1 (1 << 6)
-#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_3 (4 << 6)
-#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
-#define DPLL_CFGCR1_PDIV_SHIFT (2)
-#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR1_PDIV_2 (1 << 2)
-#define DPLL_CFGCR1_PDIV_3 (2 << 2)
-#define DPLL_CFGCR1_PDIV_5 (4 << 2)
-#define DPLL_CFGCR1_PDIV_7 (8 << 2)
-#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
-#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
-#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
-
-#define _TGL_DPLL0_CFGCR0 0x164284
-#define _TGL_DPLL1_CFGCR0 0x16428C
-#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
-#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0)
-
-#define _TGL_DPLL0_DIV0 0x164B00
-#define _TGL_DPLL1_DIV0 0x164C00
-#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
-#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
-#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
-
-#define _TGL_DPLL0_CFGCR1 0x164288
-#define _TGL_DPLL1_CFGCR1 0x164290
-#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
-#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1)
-
-#define _DG1_DPLL2_CFGCR0 0x16C284
-#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
-
-#define _DG1_DPLL2_CFGCR1 0x16C288
-#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
-
-/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL4_CFGCR0 0x164294
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
-#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-
-#define _ADLS_DPLL4_CFGCR1 0x164298
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
-#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
-
-/* BXT display engine PLL */
-#define BXT_DE_PLL_CTL _MMIO(0x6d000)
-#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
-#define BXT_DE_PLL_RATIO_MASK 0xff
-
-#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
-#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
-#define BXT_DE_PLL_LOCK (1 << 30)
-#define BXT_DE_PLL_FREQ_REQ (1 << 23)
-#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
-#define ICL_CDCLK_PLL_RATIO(x) (x)
-#define ICL_CDCLK_PLL_RATIO_MASK 0xff
-
-/* GEN9 DC */
-#define DC_STATE_EN _MMIO(0x45504)
-#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_DC3CO REG_BIT(30)
-#define DC_STATE_DC3CO_STATUS REG_BIT(29)
-#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
-#define HOLD_PHY_PG1_LATCH REG_BIT(20)
-#define DC_STATE_EN_UPTO_DC5 (1 << 0)
-#define DC_STATE_EN_DC9 (1 << 3)
-#define DC_STATE_EN_UPTO_DC6 (2 << 0)
-#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
-
-#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-
-#define D_COMP_BDW _MMIO(0x138144)
-
-/* Pipe WM_LINETIME - watermark line time */
-#define _WM_LINETIME_A 0x45270
-#define _WM_LINETIME_B 0x45274
-#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
-#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
-#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
-#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
-#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
-
-/* SFUSE_STRAP */
-#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
-#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
-#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
-#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
-#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
-#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-
-/* Gen4+ Timestamp and Pipe Frame time stamp registers */
-#define GEN4_TIMESTAMP _MMIO(0x2358)
-#define ILK_TIMESTAMP_HI _MMIO(0x70070)
-#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
-
#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
-/* g4x+, except vlv/chv! */
-#define _PIPE_FRMTMSTMP_A 0x70048
-#define _PIPE_FRMTMSTMP_B 0x71048
-#define PIPE_FRMTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
-
-/* g4x+, except vlv/chv! */
-#define _PIPE_FLIPTMSTMP_A 0x7004C
-#define _PIPE_FLIPTMSTMP_B 0x7104C
-#define PIPE_FLIPTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
-
-/* tgl+ */
-#define _PIPE_FLIPDONETMSTMP_A 0x70054
-#define _PIPE_FLIPDONETMSTMP_B 0x71054
-#define PIPE_FLIPDONETIMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
-
-#define _VLV_PIPE_MSA_MISC_A 0x70048
-#define VLV_PIPE_MSA_MISC(__display, pipe) \
- _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
-#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
-#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
-
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
@@ -4184,45 +1223,6 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define _ICL_PHY_MISC_A 0x64C00
-#define _ICL_PHY_MISC_B 0x64C04
-#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
-#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
-#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
- ICL_PHY_MISC(port))
-#define ICL_PHY_MISC_MUX_DDID (1 << 28)
-#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
-#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-
-#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
-#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
-#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
-#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
-
-#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
-#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
-#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
-#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
-
-#define _TCSS_DDI_STATUS_1 0x161500
-#define _TCSS_DDI_STATUS_2 0x161504
-#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
- _TCSS_DDI_STATUS_1, \
- _TCSS_DDI_STATUS_2))
-#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
-#define TCSS_DDI_STATUS_READY REG_BIT(2)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
-
#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
@@ -4231,32 +1231,11 @@ enum skl_power_gate {
#define OROM_OFFSET _MMIO(0x1020c0)
#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
-#define CLKREQ_POLICY _MMIO(0x101038)
-#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
-
-#define CLKGATE_DIS_MISC _MMIO(0x46534)
-#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
-
-#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
-#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
-#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
-
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
-#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
-#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
-#define MTL_TRCD_MASK REG_GENMASK(31, 24)
-#define MTL_TRP_MASK REG_GENMASK(23, 16)
-#define MTL_DCLK_MASK REG_GENMASK(15, 0)
-
-#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
-#define MTL_TRAS_MASK REG_GENMASK(16, 8)
-#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
-
#define MTL_MEDIA_GSI_BASE 0x380000
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 94a8f902689e..bfe98cb9a038 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
-/**
- * REG_BIT() - Prepare a u32 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u32, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT(__n) \
- ((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 31))))
-
-/**
- * REG_BIT8() - Prepare a u8 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u8, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT8(__n) \
- ((u8)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 7))))
-
-/**
- * REG_GENMASK() - Prepare a continuous u32 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u32, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK(__high, __low) \
- ((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
-
-/**
- * REG_GENMASK64() - Prepare a continuous u64 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
*/
-#define REG_GENMASK64(__high, __low) \
- ((u64)(GENMASK_ULL(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-/**
- * REG_GENMASK8() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u8, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK8(__high, __low) \
- ((u8)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-/**
- * REG_BIT16() - Prepare a u16 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u16, with compile time
- * checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT16(__n) \
- ((u16)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 15))))
-
-/**
- * REG_GENMASK16() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u16, with compile time
- * checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK16(__high, __low) \
- ((u16)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c3d27eadc0a7..b9a2b2194c8f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -2184,7 +2184,7 @@ void i915_request_show(struct drm_printer *m,
const char *prefix,
int indent)
{
- const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ const char __rcu *timeline;
char buf[80] = "";
int x = 0;
@@ -2220,6 +2220,8 @@ void i915_request_show(struct drm_printer *m,
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+ rcu_read_lock();
+ timeline = dma_fence_timeline_name((struct dma_fence *)&rq->fence);
drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
prefix, indent, " ",
queue_status(rq),
@@ -2228,7 +2230,8 @@ void i915_request_show(struct drm_printer *m,
fence_status(rq),
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
+ rcu_dereference(timeline));
+ rcu_read_unlock();
}
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1d4cc91c0e40..73e89b168fc3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -427,18 +427,25 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
static void timer_i915_sw_fence_wake(struct timer_list *t)
{
- struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
+ struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
+ timer);
struct i915_sw_fence *fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
fence = xchg(&cb->base.fence, NULL);
if (!fence)
return;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(cb->dma);
+ timeline = dma_fence_timeline_name(cb->dma);
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
- cb->dma->ops->get_driver_name(cb->dma),
- cb->dma->ops->get_timeline_name(cb->dma),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ rcu_read_unlock();
i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index f45bd6b6cede..4c02a04be681 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,6 +5,8 @@
#include <linux/vga_switcheroo.h>
+#include "display/intel_display_core.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_switcheroo.h"
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index f936e8f1f129..622c66666935 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -140,8 +140,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
static const struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read_new = i915_l3_read,
- .write_new = i915_l3_write,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
.mmap = NULL,
.private = (void *)0
};
@@ -149,8 +149,8 @@ static const struct bin_attribute dpf_attrs = {
static const struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read_new = i915_l3_read,
- .write_new = i915_l3_write,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 609214231ffc..f7fb40cfdb70 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,6 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
-
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 632e316f8b05..25e97031d76e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1607,6 +1607,26 @@ err_rpm:
return err;
}
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return err;
+}
+
static void flush_idle_contexts(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 6a6be8048aa8..0f9eee6d18d2 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -289,26 +289,8 @@ int __must_check
i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct i915_gem_ww_ctx ww;
- int err;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- err = i915_gem_object_lock(vma->obj, &ww);
- if (!err)
- err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- return err;
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags);
@@ -353,6 +335,11 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node,
return drm_mm_node_allocated(node) && node->color != color;
}
+static inline void __iomem *i915_vma_get_iomap(struct i915_vma *vma)
+{
+ return READ_ONCE(vma->iomap);
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 387b26400169..f86a3629ae9e 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -27,6 +27,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
@@ -37,7 +38,7 @@
#include "i915_reg.h"
#include "intel_clock_gating.h"
#include "intel_mchbar_regs.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
@@ -201,6 +202,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *i915)
static void cpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
u32 val;
@@ -220,7 +222,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (i915->display.vbt.fdi_rx_polarity_inverted)
+ if (display->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 76d84cbb8361..87ac4446d306 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -11,6 +11,7 @@
#include "display/intel_color_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -20,7 +21,9 @@
#include "display/intel_lvds_regs.h"
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
@@ -31,6 +34,7 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "intel_gvt.h"
#include "intel_mchbar_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d40ee1b42110..59bd603e6deb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
+bool intel_memory_type_is_local(enum intel_memory_type mem_type)
+{
+ switch (mem_type) {
+ case INTEL_MEMORY_LOCAL:
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
@@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
-static const char *region_type_str(u16 type)
+const char *intel_memory_type_str(enum intel_memory_type type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
@@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
- region_type_str(type), instance);
+ intel_memory_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 5973b6fe13cf..b3b75be9ced5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -85,6 +85,8 @@ struct intel_memory_region {
void *region_private;
};
+bool intel_memory_type_is_local(enum intel_memory_type mem_type);
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
@@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915);
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type);
+const char *intel_memory_type_str(enum intel_memory_type type);
__printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 3db2ba439bb5..81da75108c60 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -110,13 +110,12 @@ int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
}
int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
+ int timeout_ms)
{
int err;
mutex_lock(&uncore->i915->sb_lock);
- err = __snb_pcode_rw(uncore, mbox, &val, NULL,
- fast_timeout_us, slow_timeout_ms, false);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false);
mutex_unlock(&uncore->i915->sb_lock);
if (err) {
@@ -273,3 +272,27 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3
return err;
}
+
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_read(&i915->uncore, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
+ timeout_base_ms);
+}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 8d2198e29422..c91a821a88d4 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
+struct drm_device;
struct intel_uncore;
int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int timeout_ms);
#define snb_pcode_write(uncore, mbox, val) \
- snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
+ snb_pcode_write_timeout((uncore), (mbox), (val), 1)
int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
@@ -27,4 +27,13 @@ int intel_pcode_init(struct intel_uncore *uncore);
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d9f4c410546..7ce3e6de0c19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -59,7 +59,9 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
+ if (!rpm->debug.class)
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT,
+ "intel_runtime_pm");
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
deleted file mode 100644
index 41e85ac773dc..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2013-2021 Intel Corporation
- *
- * LPT/WPT IOSF sideband.
- */
-
-#include "i915_drv.h"
-#include "intel_sbi.h"
-#include "i915_reg.h"
-
-/* SBI access */
-static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination,
- u32 *val, bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 cmd;
-
- lockdep_assert_held(&i915->sbi_lock);
-
- if (intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to become ready\n");
- return -EBUSY;
- }
-
- intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
- intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
-
- if (destination == SBI_ICLK)
- cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
- else
- cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- if (!is_read)
- cmd |= BIT(8);
- intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100, 100, &cmd)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to complete read\n");
- return -ETIMEDOUT;
- }
-
- if (cmd & SBI_RESPONSE_FAIL) {
- drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
- return -ENXIO;
- }
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, SBI_DATA);
-
- return 0;
-}
-
-void intel_sbi_lock(struct drm_i915_private *i915)
-{
- mutex_lock(&i915->sbi_lock);
-}
-
-void intel_sbi_unlock(struct drm_i915_private *i915)
-{
- mutex_unlock(&i915->sbi_lock);
-}
-
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination)
-{
- u32 result = 0;
-
- intel_sbi_rw(i915, reg, destination, &result, true);
-
- return result;
-}
-
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination)
-{
- intel_sbi_rw(i915, reg, destination, &value, false);
-}
-
-void intel_sbi_init(struct drm_i915_private *i915)
-{
- mutex_init(&i915->sbi_lock);
-}
-
-void intel_sbi_fini(struct drm_i915_private *i915)
-{
- mutex_destroy(&i915->sbi_lock);
-}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
deleted file mode 100644
index 85161a4f13b8..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _INTEL_SBI_H_
-#define _INTEL_SBI_H_
-
-#include <linux/types.h>
-
-struct drm_i915_private;
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
-
-void intel_sbi_init(struct drm_i915_private *i915);
-void intel_sbi_fini(struct drm_i915_private *i915);
-void intel_sbi_lock(struct drm_i915_private *i915);
-void intel_sbi_unlock(struct drm_i915_private *i915);
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-
-#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 48a10ff80148..c8e29fd72290 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
+#include "display/intel_display_core.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
@@ -2642,7 +2644,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm,
"Failed to wait for Driver-FLR bit to clear! %d\n",
@@ -2657,7 +2659,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware teardown to complete */
ret = intel_wait_for_register_fw(uncore, GU_CNTL,
DRIVERFLR, 0,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
@@ -2666,7 +2668,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e39582950627..6048b99b96cb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -313,10 +313,11 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms)
+ unsigned int timeout_ms,
+ u32 *out_value)
{
return __intel_wait_for_register_fw(uncore, reg, mask, value,
- 2, timeout_ms, NULL);
+ 2, timeout_ms, out_value);
}
#define IS_GSI_REG(reg) ((reg) < 0x40000)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 07e81be4d392..7fa194de5d35 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -114,7 +114,8 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
"wakeref.work", &key->work, 0);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
- ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+ if (!wf->debug.class)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref");
#endif
}
@@ -135,7 +136,7 @@ int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
static void wakeref_auto_timeout(struct timer_list *t)
{
- struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
+ struct intel_wakeref_auto *wf = timer_container_of(wf, t, timer);
intel_wakeref_t wakeref;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 48836ef52d40..a2894a56e18f 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,8 +7,6 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
-#include <drm/drm_print.h>
-
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -16,11 +14,13 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
-#include <linux/slab.h>
-#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+struct drm_printer;
+struct intel_runtime_pm;
+struct intel_wakeref;
+
typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
@@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-struct intel_runtime_pm;
-struct intel_wakeref;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 88870844b5bd..2fb7a9e7efec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -73,8 +73,8 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
- if (!request)
- return -ENOMEM;
+ if (IS_ERR(request))
+ return PTR_ERR(request);
i915_request_add(request);
@@ -91,8 +91,8 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
request = mock_request(rcs0(i915)->kernel_context, T);
- if (!request)
- return -ENOMEM;
+ if (IS_ERR(request))
+ return PTR_ERR(request);
i915_request_get(request);
@@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
request = mock_request(rcs0(i915)->kernel_context, T);
- if (!request)
- return -ENOMEM;
+ if (IS_ERR(request))
+ return PTR_ERR(request);
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
@@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg)
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
intel_context_put(ce);
- if (!request) {
- err = -ENOMEM;
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
goto err_context_0;
}
@@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg)
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
intel_context_put(ce);
- if (!vip) {
- err = -ENOMEM;
+ if (IS_ERR(vip)) {
+ err = PTR_ERR(vip);
goto err_context_1;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index f08f6674911e..7b856b5090f9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -413,15 +413,8 @@ static int igt_mock_splintered_region(void *arg)
close_objects(mem, &objects);
- /*
- * While we should be able allocate everything without any flag
- * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
- * actually limited to the largest power-of-two for the region size i.e
- * max_order, due to the inner workings of the buddy allocator. So make
- * sure that does indeed hold true.
- */
-
- obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, roundup_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
if (!IS_ERR(obj)) {
pr_err("%s too large contiguous allocation was not rejected\n",
__func__);
@@ -429,8 +422,7 @@ static int igt_mock_splintered_region(void *arg)
goto out_close;
}
- obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
- I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) {
pr_err("%s largest possible contiguous allocation failed\n",
__func__);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index d5ecc68155da..d79e4defb71d 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -55,7 +55,7 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = from_timer(tf, t, timer);
+ struct timed_fence *tf = timer_container_of(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
@@ -77,7 +77,7 @@ void timed_fence_fini(struct timed_fence *tf)
if (timer_delete_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
- destroy_timer_on_stack(&tf->timer);
+ timer_destroy_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index eb03b5b28bad..25b8726b9dff 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void)
unsigned long long power;
u32 units;
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power))
return 0;
units = (power & 0x1f00) >> 8;
- if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power))
return 0;
return (1000000 * power) >> units; /* convert to uJ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a77e5b26542c..fb8751bd5df0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -28,6 +28,8 @@
#include <drm/drm_managed.h>
+#include "display/intel_display_device.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -141,6 +143,7 @@ struct drm_i915_private *mock_gem_device(void)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
+ struct intel_display *display;
struct pci_dev *pdev;
int ret;
@@ -180,7 +183,11 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ goto err_device;
+
+ i915->display = display;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -257,6 +264,7 @@ err_ttm:
intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
+err_device:
mock_destroy_device(i915);
return NULL;
@@ -266,6 +274,8 @@ void mock_destroy_device(struct drm_i915_private *i915)
{
struct device *dev = i915->drm.dev;
+ intel_display_device_remove(i915->display);
+
devres_release_group(dev, NULL);
put_device(dev);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 09f747228dff..1b0cf073e964 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay)
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = intel_context_create_request(ce);
if (IS_ERR(request))
- return NULL;
+ return request;
request->mock.delay = delay;
return request;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index f60eedb0e92c..deb159548a09 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -5,12 +5,17 @@
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "vlv_sideband.h"
+#include "intel_uncore.h"
+#include "vlv_iosf_sb.h"
struct dram_dimm_info {
u16 size;
@@ -33,8 +38,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
};
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
@@ -91,9 +102,9 @@ static unsigned int chv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
- val = vlv_cck_read(i915, CCK_FUSE_REG);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_CCK));
switch ((val >> 2) & 0x7) {
case 3:
@@ -107,9 +118,9 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
switch ((val >> 6) & 3) {
case 0:
@@ -375,9 +386,8 @@ intel_is_dram_symmetric(const struct dram_channel_info *ch0,
}
static int
-skl_dram_get_channels_info(struct drm_i915_private *i915)
+skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
@@ -438,16 +448,13 @@ skl_get_dram_type(struct drm_i915_private *i915)
}
static int
-skl_get_dram_info(struct drm_i915_private *i915)
+skl_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
int ret;
dram_info->type = skl_get_dram_type(i915);
- drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
- ret = skl_dram_get_channels_info(i915);
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -532,9 +539,8 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
}
-static int bxt_get_dram_info(struct drm_i915_private *i915)
+static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
u32 val;
u8 valid_ranks = 0;
int i;
@@ -560,10 +566,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_info->type != type);
drm_dbg_kms(&i915->drm,
- "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
+ dimm.size, dimm.width, dimm.ranks);
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
@@ -580,14 +585,14 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct dram_info *dram_info)
{
- struct dram_info *dram_info = &dev_priv->dram_info;
u32 val = 0;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
@@ -642,27 +647,26 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
return 0;
}
-static int gen11_get_dram_info(struct drm_i915_private *i915)
+static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915);
+ int ret = skl_get_dram_info(i915, dram_info);
if (ret)
return ret;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int gen12_get_dram_info(struct drm_i915_private *i915)
+static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- i915->dram_info.wm_lv_0_adjust_needed = false;
+ dram_info->wm_lv_0_adjust_needed = false;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
- struct dram_info *dram_info = &i915->dram_info;
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
case 0:
@@ -703,16 +707,22 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-void intel_dram_detect(struct drm_i915_private *i915)
+int intel_dram_detect(struct drm_i915_private *i915)
{
- struct dram_info *dram_info = &i915->dram_info;
+ struct dram_info *dram_info;
int ret;
detect_fsb_freq(i915);
detect_mem_freq(i915);
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
- return;
+ return 0;
+
+ dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
+ if (!dram_info)
+ return -ENOMEM;
+
+ i915->dram_info = dram_info;
/*
* Assume level 0 watermark latency adjustment is needed until proven
@@ -721,17 +731,22 @@ void intel_dram_detect(struct drm_i915_private *i915)
dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
if (DISPLAY_VER(i915) >= 14)
- ret = xelpdp_get_dram_info(i915);
+ ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
- ret = gen12_get_dram_info(i915);
+ ret = gen12_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 11)
- ret = gen11_get_dram_info(i915);
+ ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
- ret = bxt_get_dram_info(i915);
+ ret = bxt_get_dram_info(i915, dram_info);
else
- ret = skl_get_dram_info(i915);
+ ret = skl_get_dram_info(i915, dram_info);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ /* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
- return;
+ return 0;
drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
@@ -739,6 +754,20 @@ void intel_dram_detect(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
str_yes_no(dram_info->wm_lv_0_adjust_needed));
+
+ return 0;
+}
+
+/*
+ * Returns NULL for platforms that don't have dram info. Avoid overzealous NULL
+ * checks, and prefer not dereferencing on platforms that shouldn't look at dram
+ * info, to catch accidental and incorrect dram info checks.
+ */
+const struct dram_info *intel_dram_info(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return i915->dram_info;
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index a10136eda674..2a696e03aad4 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -6,10 +6,34 @@
#ifndef __INTEL_DRAM_H__
#define __INTEL_DRAM_H__
+#include <linux/types.h>
+
struct drm_i915_private;
+struct drm_device;
+
+struct dram_info {
+ bool wm_lv_0_adjust_needed;
+ u8 num_channels;
+ bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
+ INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
+ } type;
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+};
void intel_dram_edram_detect(struct drm_i915_private *i915);
-void intel_dram_detect(struct drm_i915_private *i915);
+int intel_dram_detect(struct drm_i915_private *i915);
unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+const struct dram_info *intel_dram_info(struct drm_device *drm);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 734e9f2801ea..5346b8dda79a 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -10,6 +10,8 @@
#include <drm/drm_managed.h>
#include <drm/intel/i915_drm.h>
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
deleted file mode 100644
index 82dc7fbd1a3e..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ /dev/null
@@ -1,316 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-#include "intel_pch.h"
-
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
-#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
-#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
-#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
-#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
-#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PPT is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_H;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_LP;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_H;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_LP;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm,
- "Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
- /* CMP is CNP compatible */
- return PCH_CNP;
- case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* CMP-V is based on KBP, which is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* MCC is TGP compatible */
- return PCH_TGP;
- case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_TGP;
- case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* JSP is ICP compatible */
- return PCH_ICP;
- case INTEL_PCH_ADP_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
- !IS_ALDERLAKE_P(dev_priv));
- return PCH_ADP;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static void
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
- unsigned short *pch_id, enum intel_pch *pch_type)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
- id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
- else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
- id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 5)
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
- else
- drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
-
- *pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && *pch_type == PCH_NONE))
- id = 0;
-
- *pch_id = id;
-}
-
-void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
- unsigned short id;
- enum intel_pch pch_type;
-
- /*
- * South display engine on the same PCI device: just assign the fake
- * PCH.
- */
- if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->pch_type = PCH_LNL;
- return;
- } else if (IS_BATTLEMAGE(dev_priv) || IS_METEORLAKE(dev_priv)) {
- /*
- * Both north display and south display are on the SoC die.
- * The real PCH (if it even exists) is uninvolved in display.
- */
- dev_priv->pch_type = PCH_MTL;
- return;
- } else if (IS_DG2(dev_priv)) {
- dev_priv->pch_type = PCH_DG2;
- return;
- } else if (IS_DG1(dev_priv)) {
- dev_priv->pch_type = PCH_DG1;
- return;
- }
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan through
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- } else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- } else {
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
- }
- }
-
- pci_dev_put(pch);
-}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
deleted file mode 100644
index 635aea7a5539..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#ifndef __INTEL_PCH__
-#define __INTEL_PCH__
-
-struct drm_i915_private;
-
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
- PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake/Jasper Lake PCH */
- PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
- PCH_ADP, /* Alder Lake PCH */
-
- /* Fake PCHs, functionality handled on the same PCI dev */
- PCH_DG1 = 1024,
- PCH_DG2,
- PCH_MTL,
- PCH_LNL,
-};
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
-#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
-#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT_H(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H)
-#define HAS_PCH_LPT_LP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H || \
- INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-
-void intel_detect_pch(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_iosf_sb.c
index 114ae8eb9cd5..f4b386933141 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.c
@@ -6,9 +6,7 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "vlv_sideband.h"
-
-#include "display/intel_dpio_phy.h"
+#include "vlv_iosf_sb.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
@@ -57,19 +55,29 @@ static void __vlv_punit_put(struct drm_i915_private *i915)
iosf_mbi_punit_release();
}
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
{
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
mutex_lock(&i915->vlv_iosf_sb.lock);
+
+ i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
}
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
+
+ drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
+
mutex_unlock(&i915->vlv_iosf_sb.lock);
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
}
@@ -123,131 +131,83 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
return err;
}
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
+ unit == VLV_IOSF_SB_FLISDSI)
+ return DPIO_DEVFN;
+ else
+ return PCI_DEVFN(0, 0);
}
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRWRDA_NP, reg, &val);
+ switch (unit) {
+ case VLV_IOSF_SB_BUNIT:
+ return IOSF_PORT_BUNIT;
+ case VLV_IOSF_SB_CCK:
+ return IOSF_PORT_CCK;
+ case VLV_IOSF_SB_CCU:
+ return IOSF_PORT_CCU;
+ case VLV_IOSF_SB_DPIO:
+ return IOSF_PORT_DPIO;
+ case VLV_IOSF_SB_DPIO_2:
+ return IOSF_PORT_DPIO_2;
+ case VLV_IOSF_SB_FLISDSI:
+ return IOSF_PORT_FLISDSI;
+ case VLV_IOSF_SB_GPIO:
+ return 0; /* FIXME: unused */
+ case VLV_IOSF_SB_NC:
+ return IOSF_PORT_NC;
+ case VLV_IOSF_SB_PUNIT:
+ return IOSF_PORT_PUNIT;
+ default:
+ return 0;
+ }
}
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
+ return write ? SB_MWR_NP : SB_MRD_NP;
+ else
+ return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
}
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRWRDA_NP, reg, &val);
-}
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode, val = 0;
-static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
-{
- /*
- * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(i915))
- return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
- else
- return IOSF_PORT_DPIO;
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, false);
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return 0;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
- /*
- * FIXME: There might be some registers where all 1's is a valid value,
- * so ideally we should check the register offset instead...
- */
- drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO PHY%d read reg 0x%x == 0x%x\n",
- phy, reg, val);
+ vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
return val;
}
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val)
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, true);
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return -EINVAL;
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
-}
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
+ return vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
}
void vlv_iosf_sb_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/vlv_iosf_sb.h b/drivers/gpu/drm/i915/vlv_iosf_sb.h
new file mode 100644
index 000000000000..e2fea29a30ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+struct drm_i915_private;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask);
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask);
+
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr);
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val);
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband_reg.h b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
index b7fbff3d0409..f977fb3b6e17 100644
--- a/drivers/gpu/drm/i915/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
@@ -3,8 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#ifndef _VLV_SIDEBAND_REG_H_
-#define _VLV_SIDEBAND_REG_H_
+#ifndef _VLV_IOSF_SB_REG_H_
+#define _VLV_IOSF_SB_REG_H_
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
@@ -177,4 +177,4 @@
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-#endif /* _VLV_SIDEBAND_REG_H_ */
+#endif /* _VLV_IOSF_SB_REG_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
deleted file mode 100644
index 31813e07c56f..000000000000
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/bitops.h>
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum dpio_phy;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-void vlv_iosf_sb_init(struct drm_i915_private *i915);
-void vlv_iosf_sb_fini(struct drm_i915_private *i915);
-
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
-
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg);
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg);
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg);
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val);
-
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg);
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr);
-
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr);
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val);
-
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 3d9d4d40fb80..7cca66f00a38 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -12,8 +12,10 @@ powervr-y := \
pvr_fw.o \
pvr_fw_meta.o \
pvr_fw_mips.o \
+ pvr_fw_riscv.o \
pvr_fw_startstop.o \
pvr_fw_trace.o \
+ pvr_fw_util.o \
pvr_gem.o \
pvr_hwrt.o \
pvr_job.o \
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
index 6b77c9b4bde8..c7ce7daaa87a 100644
--- a/drivers/gpu/drm/imagination/pvr_debugfs.c
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor)
struct drm_device *drm_dev = minor->dev;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct dentry *root = minor->debugfs_root;
- size_t i;
- for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
struct dentry *dir;
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 1704c0268589..8b9ba4983c4c 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_device_reset_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
mutex_unlock(&pvr_dev->queues.lock);
}
+static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
+{
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
+
+ return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
+}
+
+static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
+{
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
+ ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
+}
+
+static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
+
+ /* Handle only these events on the host and leave the rest to the FW. */
+ events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
+ ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
+ u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
+
+ drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
+ }
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
+ /*
+ * The watchdog timer is disabled by the driver so this event
+ * should never be fired.
+ */
+ drm_info(drm_dev, "Safety event: Watchdog timeout\n");
+ }
+}
+
static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
irqreturn_t ret = IRQ_NONE;
/* We are in the threaded handler, we can keep dequeuing events until we
@@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
pvr_device_process_active_queues(pvr_dev);
}
- pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+ pm_runtime_mark_last_busy(drm_dev->dev);
ret = IRQ_HANDLED;
}
- /* Unmask FW irqs before returning, so new interrupts can be received. */
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events) {
+ int err;
+
+ /*
+ * Ensure the GPU is powered on since some safety events (such
+ * as ECC faults) can happen outside of job submissions, which
+ * are otherwise the only time a power reference is held.
+ */
+ err = pvr_power_get(pvr_dev);
+ if (err) {
+ drm_err_ratelimited(drm_dev,
+ "%s: could not take power reference (%d)\n",
+ __func__, err);
+ return ret;
+ }
+
+ while (pvr_device_safety_irq_pending(pvr_dev)) {
+ pvr_device_safety_irq_clear(pvr_dev);
+ pvr_device_handle_safety_events(pvr_dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ pvr_power_put(pvr_dev);
+ }
+
return ret;
}
static irqreturn_t pvr_device_irq_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ bool safety_irq_pending = false;
+
+ if (pvr_dev->has_safety_events)
+ safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
- if (!pvr_fw_irq_pending(pvr_dev))
+ if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
return IRQ_NONE; /* Spurious IRQ - ignore. */
- /* Mask the FW interrupts before waking up the thread. Will be unmasked
- * when the thread handler is done processing events.
- */
- pvr_fw_irq_disable(pvr_dev);
return IRQ_WAKE_THREAD;
}
+static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
+{
+ u32 num_ecc_rams = 0;
+
+ /*
+ * Safety events are an optional feature of the RogueXE platform. They
+ * are only enabled if at least one of ECC memory or the watchdog timer
+ * are present in HW. While safety events can be generated by other
+ * systems, that will never happen if the above mentioned hardware is
+ * not present.
+ */
+ if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ pvr_dev->has_safety_events = false;
+ return;
+ }
+
+ PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
+
+ pvr_dev->has_safety_events =
+ num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
+}
+
/**
* pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
* @pvr_dev: Target PowerVR device.
@@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev)
init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+ pvr_device_safety_irq_init(pvr_dev);
+
pvr_dev->irq = platform_get_irq(plat_dev, 0);
if (pvr_dev->irq < 0)
return pvr_dev->irq;
/* Clear any pending events before requesting the IRQ line. */
pvr_fw_irq_clear(pvr_dev);
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events)
+ pvr_device_safety_irq_clear(pvr_dev);
+
+ /*
+ * The ONESHOT flag ensures IRQs are masked while the thread handler is
+ * running.
+ */
return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
pvr_device_irq_thread_handler,
- IRQF_SHARED, "gpu", pvr_dev);
+ IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
}
/**
@@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
+ /* Get the reset line for the GPU */
+ err = pvr_device_reset_init(pvr_dev);
+ if (err)
+ return err;
+
/* Explicitly power the GPU so we can access control registers before the FW is booted. */
err = pm_runtime_resume_and_get(dev);
if (err)
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 6d0dfacb677b..7cb01c38d2a9 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -18,6 +18,7 @@
#include <linux/bits.h>
#include <linux/compiler_attributes.h>
#include <linux/compiler_types.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -131,6 +132,22 @@ struct pvr_device {
*/
struct clk *mem_clk;
+ struct pvr_device_power {
+ struct device **domain_devs;
+ struct device_link **domain_links;
+
+ u32 domain_count;
+ } power;
+
+ /**
+ * @reset: Optional reset line.
+ *
+ * This may be used on some platforms to provide a reset line that needs to be de-asserted
+ * after power-up procedure. It would also need to be asserted after the power-down
+ * procedure.
+ */
+ struct reset_control *reset;
+
/** @irq: IRQ number. */
int irq;
@@ -300,6 +317,9 @@ struct pvr_device {
* struct pvr_file.
*/
spinlock_t ctx_list_lock;
+
+ /** @has_safety_events: Whether this device can raise safety events. */
+ bool has_safety_events;
};
/**
@@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
__union_size, __member_size); \
})
-#define PVR_FW_PROCESSOR_TYPE_META 0
-#define PVR_FW_PROCESSOR_TYPE_MIPS 1
-#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+/*
+ * These utility functions should more properly be placed in pvr_fw.h, but that
+ * would cause a dependency cycle between that header and this one. Since
+ * they're primarily used in pvr_device.c, let's put them in here for now.
+ */
+
+static __always_inline bool
+pvr_fw_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
+}
+
+static __always_inline void
+pvr_fw_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
+}
#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 0639502137b4..b058ec183bb3 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -44,6 +44,7 @@
* This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
*
* * AXE-1-16M (found in Texas Instruments AM62)
+ * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
*/
/**
@@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, drm_dev);
+ err = pvr_power_domains_init(pvr_dev);
+ if (err)
+ return err;
+
init_rwsem(&pvr_dev->reset_sem);
pvr_context_device_init(pvr_dev);
@@ -1450,6 +1455,8 @@ err_watchdog_fini:
err_context_fini:
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
+
return err;
}
@@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_watchdog_fini(pvr_dev);
pvr_queue_device_fini(pvr_dev);
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-rogue", .data = NULL },
+
+ /*
+ * This legacy compatible string was introduced early on before the more generic
+ * "img,img-rogue" was added. Keep it around here for compatibility, but never use
+ * "img,img-axe" in new devicetrees.
+ */
{ .compatible = "img,img-axe", .data = NULL },
{}
};
@@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5e51bc980751..5228e214491c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u64 dma_pfn = dma_addr >>
ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
- u32 dma_addr_offset;
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
- for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
WARN_ON_ONCE(dma_pfn >> 32);
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index d09c4c684116..b2f8cba77346 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == id)
return &layout_entries[entry];
}
@@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == META_PRIVATE_DATA ||
layout_entries[entry].id == MIPS_PRIVATE_DATA ||
layout_entries[entry].id == RISCV_PRIVATE_DATA)
@@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
const u8 *fw = firmware->data;
u32 fw_offset = firmware->size - SZ_4K;
u32 layout_table_size;
- u32 entry;
if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
return -EINVAL;
@@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
return -EINVAL;
layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
- for (entry = 0; entry < header->layout_entry_num; entry++) {
+ for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
u32 start_addr = layout_entries[entry].base_addr;
u32 end_addr = start_addr + layout_entries[entry].alloc_size;
@@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u32 end_addr = addr + size;
- int entry = 0;
/* Ensure requested range is not zero, and size is not causing addr to overflow. */
if (end_addr <= addr)
return -EINVAL;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (int entry = 0; entry < num_layout_entries; entry++) {
u32 entry_start_addr = layout_entries[entry].base_addr;
u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
@@ -441,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv)
runtime_cfg->active_pm_latency_persistant = true;
WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
&runtime_cfg->default_dusts_num_init) != 0);
+
+ /* Keep watchdog timer disabled. */
+ runtime_cfg->wdg_period_us = 0;
}
static void
@@ -663,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
return PTR_ERR(fw_code_ptr);
}
- if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
fw_data_ptr =
@@ -939,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
int
pvr_fw_init(struct pvr_device *pvr_dev)
{
+ static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
+ [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
+ [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
+ [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
+ };
+
u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
int err;
- if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
- fw_dev->defs = &pvr_fw_defs_meta;
- else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
- fw_dev->defs = &pvr_fw_defs_mips;
- else
+ if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
return -EINVAL;
+ fw_dev->defs = fw_defs[fw_dev->processor_type];
+
err = fw_dev->defs->init(pvr_dev);
if (err)
return err;
@@ -1456,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset,
*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
+}
+
/*
* pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
* structures
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index b7966bd574a9..1404dd492d7c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -167,47 +167,30 @@ struct pvr_fw_defs {
int (*wrapper_init)(struct pvr_device *pvr_dev);
/**
- * @has_fixed_data_addr:
+ * @irq_pending: Check interrupt status register for pending interrupts.
*
- * Called to check if firmware fixed data must be loaded at the address given by the
- * firmware layout table.
+ * @pvr_dev: Target PowerVR device.
*
* This function is mandatory.
+ */
+ bool (*irq_pending)(struct pvr_device *pvr_dev);
+
+ /**
+ * @irq_clear: Clear pending interrupts.
*
- * Returns:
- * * %true if firmware fixed data must be loaded at the address given by the firmware
- * layout table.
- * * %false otherwise.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
*/
- bool (*has_fixed_data_addr)(void);
+ void (*irq_clear)(struct pvr_device *pvr_dev);
/**
- * @irq: FW Interrupt information.
+ * @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the
+ * address given by the firmware layout table.
*
- * Those are processor dependent, and should be initialized by the
- * processor backend in pvr_fw_funcs::init().
+ * This value is mandatory.
*/
- struct {
- /** @enable_reg: FW interrupt enable register. */
- u32 enable_reg;
-
- /** @status_reg: FW interrupt status register. */
- u32 status_reg;
-
- /**
- * @clear_reg: FW interrupt clear register.
- *
- * If @status_reg == @clear_reg, we clear by write a bit to zero,
- * otherwise we clear by writing a bit to one.
- */
- u32 clear_reg;
-
- /** @event_mask: Bitmask of events to listen for. */
- u32 event_mask;
-
- /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
- u32 clear_mask;
- } irq;
+ bool has_fixed_data_addr;
};
/**
@@ -400,26 +383,16 @@ struct pvr_fw_device {
} fw_objs;
};
-#define pvr_fw_irq_read_reg(pvr_dev, name) \
- pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
-
-#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
- pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
-
-#define pvr_fw_irq_pending(pvr_dev) \
- (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_clear(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
-
-#define pvr_fw_irq_enable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_disable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+enum pvr_fw_processor_type {
+ PVR_FW_PROCESSOR_TYPE_META = 0,
+ PVR_FW_PROCESSOR_TYPE_MIPS,
+ PVR_FW_PROCESSOR_TYPE_RISCV,
+ PVR_FW_PROCESSOR_TYPE_COUNT,
+};
extern const struct pvr_fw_defs pvr_fw_defs_meta;
extern const struct pvr_fw_defs pvr_fw_defs_mips;
+extern const struct pvr_fw_defs pvr_fw_defs_riscv;
int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
int pvr_fw_init(struct pvr_device *pvr_dev);
@@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj);
+
+static __always_inline size_t
+pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_size(fw_obj->gem);
+}
+
+/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr);
+
#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 6d13864851fc..60db3668ad3c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u64 seg_out_addr_top;
- u32 i;
seg_out_addr_top =
ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
ROGUE_FW_SEGMMU_META_BIFDM_ID);
- for (i = 0; i < num_layout_entries; i++) {
+ for (u32 i = 0; i < num_layout_entries; i++) {
/*
* FW code is using the bootloader segment which is already
* configured on boot. FW coremem code and data don't use the
@@ -534,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
}
static bool
-pvr_meta_has_fixed_data_addr(void)
+pvr_meta_irq_pending(struct pvr_device *pvr_dev)
{
- return false;
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) &
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+}
+
+static void
+pvr_meta_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
}
const struct pvr_fw_defs pvr_fw_defs_meta = {
@@ -546,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = {
.vm_unmap = pvr_meta_vm_unmap,
.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
.wrapper_init = pvr_meta_wrapper_init,
- .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
- .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
- .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
- },
+ .irq_pending = pvr_meta_irq_pending,
+ .irq_clear = pvr_meta_irq_clear,
+ .has_fixed_data_addr = false,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
index 0bed0257e2ab..6914fc46db50 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -8,7 +8,6 @@
#include "pvr_rogue_mips.h"
#include "pvr_vm_mips.h"
-#include <linux/elf.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -16,60 +15,6 @@
#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
-/**
- * process_elf_command_stream() - Process ELF firmware image and populate
- * firmware sections
- * @pvr_dev: Device pointer.
- * @fw: Pointer to firmware image.
- * @fw_code_ptr: Pointer to FW code section.
- * @fw_data_ptr: Pointer to FW data section.
- * @fw_core_code_ptr: Pointer to FW coremem code section.
- * @fw_core_data_ptr: Pointer to FW coremem data section.
- *
- * Returns :
- * * 0 on success, or
- * * -EINVAL on any error in ELF command stream.
- */
-static int
-process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
- u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
-{
- struct elf32_hdr *header = (struct elf32_hdr *)fw;
- struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 entry;
- int err;
-
- for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
- void *write_addr;
-
- /* Only consider loadable entries in the ELF segment table */
- if (program_header->p_type != PT_LOAD)
- continue;
-
- err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
- program_header->p_memsz, fw_code_ptr, fw_data_ptr,
- fw_core_code_ptr, fw_core_data_ptr, &write_addr);
- if (err) {
- drm_err(drm_dev,
- "Addr 0x%x (size: %d) not found in any firmware segment",
- program_header->p_vaddr, program_header->p_memsz);
- return err;
- }
-
- /* Write to FW allocation only if available */
- if (write_addr) {
- memcpy(write_addr, fw + program_header->p_offset,
- program_header->p_filesz);
-
- memset((u8 *)write_addr + program_header->p_filesz, 0,
- program_header->p_memsz - program_header->p_filesz);
- }
- }
-
- return 0;
-}
-
static int
pvr_mips_init(struct pvr_device *pvr_dev)
{
@@ -97,11 +42,10 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
const struct pvr_fw_layout_entry *stack_entry;
struct rogue_mipsfw_boot_data *boot_data;
dma_addr_t dma_addr;
- u32 page_nr;
int err;
- err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
- fw_core_data_ptr);
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
if (err)
return err;
@@ -132,7 +76,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
boot_data->reg_base = pvr_dev->regs_resource->start;
- for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
/* Firmware expects 4k pages, but host page size might be different. */
u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
@@ -228,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
}
static bool
-pvr_mips_has_fixed_data_addr(void)
+pvr_mips_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) &
+ ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+}
+
+static void
+pvr_mips_irq_clear(struct pvr_device *pvr_dev)
{
- return true;
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
}
const struct pvr_fw_defs pvr_fw_defs_mips = {
@@ -241,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = {
.vm_unmap = pvr_vm_mips_unmap,
.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
.wrapper_init = pvr_mips_wrapper_init,
- .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
- .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
- .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
- .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
- .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
- },
+ .irq_pending = pvr_mips_irq_pending,
+ .irq_clear = pvr_mips_irq_clear,
+ .has_fixed_data_addr = true,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_riscv.c b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
new file mode 100644
index 000000000000..fc13d483be9a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_riscv.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
+#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
+
+static int
+pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
+{
+ const u64 common_opts =
+ ((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
+ ((u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
+
+ u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
+ u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ /* This condition allows us to OR the addresses into the register directly. */
+ static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
+
+ WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+ WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
+ code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
+ data_addr | common_opts |
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+ /* Garten IDLE bit controlled by RISC-V. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ return 0;
+}
+
+struct rogue_riscv_fw_boot_data {
+ u64 coremem_code_dev_vaddr;
+ u64 coremem_data_dev_vaddr;
+ u32 coremem_code_fw_addr;
+ u32 coremem_data_fw_addr;
+ u32 coremem_code_size;
+ u32 coremem_data_size;
+ u32 flags;
+ u32 reserved;
+};
+
+static int
+pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ struct rogue_riscv_fw_boot_data *boot_data;
+ int err;
+
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
+ if (err)
+ goto err_out;
+
+ boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
+
+ if (fw_mem->core_code_obj) {
+ boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
+ boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
+ }
+
+ if (fw_mem->core_data_obj) {
+ boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
+ boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
+ }
+
+ return 0;
+
+err_out:
+ return err;
+}
+
+static int
+pvr_riscv_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset;
+
+ /* RISC-V cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
+ else
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
+
+ return fw_addr;
+}
+
+static int
+pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
+ ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
+}
+
+static void
+pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
+ ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
+}
+
+const struct pvr_fw_defs pvr_fw_defs_riscv = {
+ .init = pvr_riscv_init,
+ .fw_process = pvr_riscv_fw_process,
+ .vm_map = pvr_riscv_vm_map,
+ .vm_unmap = pvr_riscv_vm_unmap,
+ .get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
+ .wrapper_init = pvr_riscv_wrapper_init,
+ .irq_pending = pvr_riscv_irq_pending,
+ .irq_clear = pvr_riscv_irq_clear,
+ .has_fixed_data_addr = false,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
index 36cec227cfe3..dcbb9903e791 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_startstop.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev)
pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
pc_addr);
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK;
+
+ pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr);
+ }
}
static int
@@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev)
(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
}
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV)
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0);
+
/* Set Rogue in soft-reset. */
pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
if (has_reset2)
@@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev)
/* ... and afterwards. */
udelay(3);
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ /* Boot the FW. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1);
+ udelay(3);
+ }
+
return 0;
err_reset:
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 5dbb636d7d4f..a1098b521485 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
{
struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
struct pvr_fw_trace *fw_trace = priv;
- u32 thread_nr;
tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
tracebuf_ctrl->tracebuf_flags = 0;
@@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
else
tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 thread_nr;
int err;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
trace_buffer->buf =
@@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
ARRAY_SIZE(fw_trace->buffers));
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
return 0;
err_free_buf:
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
if (trace_buffer->buf)
@@ -112,9 +110,8 @@ err_free_buf:
void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
@@ -122,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
}
-#if defined(CONFIG_DEBUG_FS)
-
/**
* update_logtype() - Send KCCB command to trigger FW to update logtype
* @pvr_dev: Target PowerVR device
@@ -184,9 +179,7 @@ struct pvr_fw_trace_seq_data {
static u32 find_sfid(u32 id)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
if (stid_fmts[i].id == id)
return i;
}
@@ -285,12 +278,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
- u32 i;
/* Reset trace index, then advance to *pos. */
fw_trace_get_first(trace_seq_data);
- for (i = 0; i < *pos; i++) {
+ for (u32 i = 0; i < *pos; i++) {
if (!fw_trace_get_next(trace_seq_data))
return NULL;
}
@@ -447,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = {
void
pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
{
- if (old_mask != new_mask)
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask)
update_logtype(pvr_dev, new_mask);
}
@@ -455,12 +447,14 @@ void
pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
"The filename buffer is only large enough for a single-digit thread count");
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
char filename[8];
snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
@@ -469,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
&pvr_fw_trace_fops);
}
}
-#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
index 0074d2b18da0..1d0ef937427a 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -65,7 +65,6 @@ struct pvr_fw_trace {
int pvr_fw_trace_init(struct pvr_device *pvr_dev);
void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
-#if defined(CONFIG_DEBUG_FS)
/* Forward declaration from <linux/dcache.h>. */
struct dentry;
@@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
u32 new_mask);
void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
-#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_util.c b/drivers/gpu/drm/imagination/pvr_fw_util.c
new file mode 100644
index 000000000000..377fe72d86b8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_util.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include <linux/elf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr,
+ u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ int err;
+
+ for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 6a8c81fe8c1e..a66cf082af24 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
@@ -76,8 +77,6 @@ pvr_gem_object_flags_validate(u64 flags)
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
- int i;
-
/*
* Check for bits set in undefined regions. Reserved regions refer to
* options that can only be set by the kernel. These are explicitly
@@ -91,7 +90,7 @@ pvr_gem_object_flags_validate(u64 flags)
* Check for all combinations of flags marked as invalid in the array
* above.
*/
- for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
@@ -203,7 +202,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
dma_resv_lock(obj->resv, NULL);
- err = drm_gem_shmem_vmap(shmem_obj, &map);
+ err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
@@ -257,7 +256,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
- drm_gem_shmem_vunmap(shmem_obj, &map);
+ drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
@@ -336,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t
struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct drm_gem_shmem_object *shmem_obj;
struct pvr_gem_object *pvr_obj;
struct sg_table *sgt;
@@ -345,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (size == 0 || !pvr_gem_object_flags_validate(flags))
return ERR_PTR(-EINVAL);
- shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
+ flags |= PVR_BO_CPU_CACHED;
+
+ shmem_obj = drm_gem_shmem_create(drm_dev, size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
@@ -360,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
goto err_shmem_object_free;
}
- dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
- DMA_BIDIRECTIONAL);
+ dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
/*
* Do this last because pvr_gem_object_zero() requires a fully
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
index e0e5ea509a2e..c99f30cc6208 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.h
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -44,8 +44,10 @@ struct pvr_file;
* Bits not defined anywhere are "undefined".
*
* CPU mapping options
- * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
- * flag to override this behaviour and map the object cached.
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set
+ * this flag to override this behaviour and map the object cached. If the dma_coherent
+ * property is present in devicetree, all allocations will be mapped as if this flag was set.
+ * This does not require any additional consideration at allocation time.
*
* Firmware options
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index 54f88d6c01e5..dc0c25fa1847 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
{
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
int err;
- int i;
hwrt->pvr_dev = pvr_dev;
hwrt->max_rts = args->layers;
/* Get pointers to the free lists */
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
if (!hwrt->free_lists[i]) {
err = -EINVAL;
@@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
return 0;
err_put_free_lists:
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -78,9 +77,7 @@ err_put_free_lists:
static void
hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct rogue_fwif_rta_ctl *rta_ctl;
- int free_list_i;
int err;
pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
&hwrt_data->data.hwrt_data_common_fw_addr);
- for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
&hwrt_data->data.freelists_fw_addr[free_list_i]);
}
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 59b334d094fa..7564b0f21b42 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -446,7 +446,7 @@ create_job(struct pvr_device *pvr_dev,
if (err)
goto err_put_job;
- err = pvr_queue_job_init(job);
+ err = pvr_queue_job_init(job, pvr_file->file->client_id);
if (err)
goto err_put_job;
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index 4fe70610ed94..450d476d183f 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/kmemleak.h>
#include <linux/minmax.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
@@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
struct device *dev = from_pvr_device(pvr_dev)->dev;
struct page *raw_page;
+ pgprot_t prot;
int err;
dma_addr_t dma_addr;
@@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
if (!raw_page)
return -ENOMEM;
- host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ prot = PAGE_KERNEL;
+ if (device_get_dma_attr(dev) != DEV_DMA_COHERENT)
+ prot = pgprot_writecombine(prot);
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, prot);
if (!host_ptr) {
err = -ENOMEM;
goto err_free_page;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index ba7816fd28ec..187a07e0bd9a 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -10,11 +10,15 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev)
clk_disable_unprepare(pvr_dev->sys_clk);
clk_disable_unprepare(pvr_dev->core_clk);
+ err = reset_control_assert(pvr_dev->reset);
+
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev)
if (err)
goto err_sys_clk_disable;
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_mem_clk_disable;
+ goto err_reset_assert;
}
drm_dev_exit(idx);
return 0;
+err_reset_assert:
+ reset_control_assert(pvr_dev->reset);
+
err_mem_clk_disable:
clk_disable_unprepare(pvr_dev->mem_clk);
@@ -317,6 +340,63 @@ pvr_power_device_idle(struct device *dev)
return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
}
+static int
+pvr_power_clear_error(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+ int err;
+
+ /* Ensure the device state is known and nothing is happening past this point */
+ pm_runtime_disable(dev);
+
+ /* Attempt to clear the runtime PM error by setting the current state again */
+ if (pm_runtime_status_suspended(dev))
+ err = pm_runtime_set_suspended(dev);
+ else
+ err = pm_runtime_set_active(dev);
+
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev),
+ "%s: Failed to clear runtime PM error (new error %d)\n",
+ __func__, err);
+ }
+
+ pm_runtime_enable(dev);
+
+ return err;
+}
+
+/**
+ * pvr_power_get_clear() - Acquire a power reference, correcting any errors
+ * @pvr_dev: Device pointer
+ *
+ * Attempt to acquire a power reference on the device. If the runtime PM
+ * is in error state, attempt to clear the error and retry.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_power_get() or the runtime PM API.
+ */
+static int
+pvr_power_get_clear(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_power_get(pvr_dev);
+ if (err == 0)
+ return err;
+
+ drm_warn(from_pvr_device(pvr_dev),
+ "%s: pvr_power_get returned error %d, attempting recovery\n",
+ __func__, err);
+
+ err = pvr_power_clear_error(pvr_dev);
+ if (err)
+ return err;
+
+ return pvr_power_get(pvr_dev);
+}
+
/**
* pvr_power_reset() - Reset the GPU
* @pvr_dev: Device pointer
@@ -341,7 +421,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
* Take a power reference during the reset. This should prevent any interference with the
* power state during reset.
*/
- WARN_ON(pvr_power_get(pvr_dev));
+ WARN_ON(pvr_power_get_clear(pvr_dev));
down_write(&pvr_dev->reset_sem);
@@ -363,13 +443,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
if (!err) {
if (hard_reset) {
pvr_dev->fw_dev.booted = false;
- WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev));
+ WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev));
err = pvr_fw_hard_reset(pvr_dev);
if (err)
goto err_device_lost;
- err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev);
+ err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev);
pvr_dev->fw_dev.booted = true;
if (err)
goto err_device_lost;
@@ -431,3 +511,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
{
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
}
+
+int pvr_power_domains_init(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct device_link **domain_links __free(kfree) = NULL;
+ struct device **domain_devs __free(kfree) = NULL;
+ int domain_count;
+ int link_count;
+
+ char dev_name[2] = "a";
+ int err;
+ int i;
+
+ domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (domain_count < 0)
+ return domain_count;
+
+ if (domain_count <= 1)
+ return 0;
+
+ link_count = domain_count + (domain_count - 1);
+
+ domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
+ if (!domain_devs)
+ return -ENOMEM;
+
+ domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
+ if (!domain_links)
+ return -ENOMEM;
+
+ for (i = 0; i < domain_count; i++) {
+ struct device *domain_dev;
+
+ dev_name[0] = 'a' + i;
+ domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
+ if (IS_ERR_OR_NULL(domain_dev)) {
+ err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
+ goto err_detach;
+ }
+
+ domain_devs[i] = domain_dev;
+ }
+
+ for (i = 0; i < domain_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ for (i = domain_count; i < link_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(domain_devs[i - domain_count + 1],
+ domain_devs[i - domain_count],
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ pvr_dev->power = (struct pvr_device_power){
+ .domain_devs = no_free_ptr(domain_devs),
+ .domain_links = no_free_ptr(domain_links),
+ .domain_count = domain_count,
+ };
+
+ return 0;
+
+err_unlink:
+ while (--i >= 0)
+ device_link_del(domain_links[i]);
+
+ i = domain_count;
+
+err_detach:
+ while (--i >= 0)
+ dev_pm_domain_detach(domain_devs[i], true);
+
+ return err;
+}
+
+void pvr_power_domains_fini(struct pvr_device *pvr_dev)
+{
+ const int domain_count = pvr_dev->power.domain_count;
+
+ int i = domain_count + (domain_count - 1);
+
+ while (--i >= 0)
+ device_link_del(pvr_dev->power.domain_links[i]);
+
+ i = domain_count;
+
+ while (--i >= 0)
+ dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
+
+ kfree(pvr_dev->power.domain_links);
+ kfree(pvr_dev->power.domain_devs);
+
+ pvr_dev->power = (struct pvr_device_power){ 0 };
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index 9a9312dcb2da..ada85674a7ca 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev)
return pm_runtime_put(drm_dev->dev);
}
+int pvr_power_domains_init(struct pvr_device *pvr_dev);
+void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5e9bc0992824..fc415dd0d7a7 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -803,7 +803,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
* the scheduler, and re-assign parent fences in the middle.
*
* Return:
- * * DRM_GPU_SCHED_STAT_NOMINAL.
+ * * DRM_GPU_SCHED_STAT_RESET.
*/
static enum drm_gpu_sched_stat
pvr_queue_timedout_job(struct drm_sched_job *s_job)
@@ -854,7 +854,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
drm_sched_start(sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/**
@@ -1073,6 +1073,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
/**
* pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
* @job: The job to initialize.
+ * @drm_client_id: drm_file.client_id submitting the job
*
* Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
* and pvr_queue_job_push() can't fail. We also make sure the context type is
@@ -1082,7 +1083,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
* * 0 on success, or
* * An error code if something failed.
*/
-int pvr_queue_job_init(struct pvr_job *job)
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id)
{
/* Fragment jobs need at least one native fence wait on the geometry job fence. */
u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
@@ -1099,7 +1100,7 @@ int pvr_queue_job_init(struct pvr_job *job)
if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
return -E2BIG;
- err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE, drm_client_id);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index 93fe9ac9f58c..fc1986d73fc8 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -143,7 +143,7 @@ struct pvr_queue {
bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
-int pvr_queue_job_init(struct pvr_job *job);
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id);
void pvr_queue_job_cleanup(struct pvr_job *job);
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
index 2a90d02796d3..790c97f80a2a 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -827,6 +827,120 @@
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+/* Register ROGUE_CR_EVENT_CLEAR */
+#define ROGUE_CR_EVENT_CLEAR 0x0138U
+#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U
+
/* Register ROGUE_CR_TIMER */
#define ROGUE_CR_TIMER 0x0160U
#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
@@ -6031,25 +6145,6 @@
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
-/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
-#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
-
/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
@@ -6163,6 +6258,26 @@
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+/* Register ROGUE_CR_FAULT_FW_STATUS */
+#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U
+#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U
+
+/* Register ROGUE_CR_FAULT_FW_CLEAR */
+#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U
+#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U
+
/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_riscv.h b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
new file mode 100644
index 000000000000..9a070e24fa6a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_RISCV_H
+#define PVR_ROGUE_RISCV_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define ROGUE_RISCVFW_REGION_SIZE SZ_256M
+#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE)
+
+enum rogue_riscvfw_region {
+ ROGUE_RISCV_REGION__RESERVED_0 = 0,
+ ROGUE_RISCV_REGION__RESERVED_1,
+ ROGUE_RISCV_REGION_SOCIF,
+ ROGUE_RISCV_REGION__RESERVED_3,
+ ROGUE_RISCV_REGION__RESERVED_4,
+ ROGUE_RISCV_REGION_BOOTLDR_DATA,
+ ROGUE_RISCV_REGION_SHARED_CACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_7,
+ ROGUE_RISCV_REGION_COREMEM,
+ ROGUE_RISCV_REGION__RESERVED_9,
+ ROGUE_RISCV_REGION__RESERVED_A,
+ ROGUE_RISCV_REGION__RESERVED_B,
+ ROGUE_RISCV_REGION_BOOTLDR_CODE,
+ ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_E,
+ ROGUE_RISCV_REGION__RESERVED_F,
+
+ ROGUE_RISCV_REGION__COUNT,
+};
+
+#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT)
+#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \
+ (ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U)
+
+#endif /* PVR_ROGUE_RISCV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
index 975336a4facf..679aa618b7a9 100644
--- a/drivers/gpu/drm/imagination/pvr_stream.c
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st
u8 *dest, u32 dest_size, u32 *stream_offset_out)
{
int err = 0;
- u32 i;
- for (i = 0; i < nr_entries; i++) {
+ for (u32 i = 0; i < nr_entries; i++) {
if (stream_def[i].offset >= dest_size) {
err = -EINVAL;
break;
@@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
u32 ext_header;
int err = 0;
- u32 i;
/* Copy "must have" mask from device. We clear this as we process the stream. */
memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
@@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
musthave_masks[type] &= ~data;
- for (i = 0; i < header->ext_streams_num; i++) {
+ for (u32 i = 0; i < header->ext_streams_num; i++) {
const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
if (!(ext_header & ext_def->header_mask))
@@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
* Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
* for this command was not present.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (musthave_masks[i])
return -EINVAL;
}
@@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs
if (err)
return err;
} else {
- u32 i;
-
/*
* If we don't have an extension stream then there must not be any "must have"
* quirks for this command.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 94af854547d6..5847a1c92bea 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
- int page_nr;
vunmap(mips_data->pt);
- for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 03535a15dd8f..3e8c6edbc17c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/gpu/drm/imx/dc/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 86f38e7c7422..c7b317640d71 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_IMX8_DC) += dc/
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX) += ipuv3/
obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dc/Kconfig b/drivers/gpu/drm/imx/dc/Kconfig
new file mode 100644
index 000000000000..415993207f2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX8_DC
+ tristate "Freescale i.MX8 Display Controller Graphics"
+ depends on DRM && COMMON_CLK && OF && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select GENERIC_IRQ_CHIP
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ enable Freescale i.MX8 Display Controller(DC) graphics support
diff --git a/drivers/gpu/drm/imx/dc/Makefile b/drivers/gpu/drm/imx/dc/Makefile
new file mode 100644
index 000000000000..b9d33c074984
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imx8-dc-drm-objs := dc-cf.o dc-crtc.o dc-de.o dc-drv.o dc-ed.o dc-fg.o dc-fl.o \
+ dc-fu.o dc-fw.o dc-ic.o dc-kms.o dc-lb.o dc-pe.o \
+ dc-plane.o dc-tc.o
+
+obj-$(CONFIG_DRM_IMX8_DC) += imx8-dc-drm.o
diff --git a/drivers/gpu/drm/imx/dc/dc-cf.c b/drivers/gpu/drm/imx/dc/dc-cf.c
new file mode 100644
index 000000000000..2f077161e912
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-cf.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define STATICCONTROL 0x8
+
+#define FRAMEDIMENSIONS 0xc
+#define HEIGHT(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define WIDTH(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define CONSTANTCOLOR 0x10
+#define BLUE(x) FIELD_PREP(GENMASK(15, 8), (x))
+
+static const struct dc_subdev_info dc_cf_info[] = {
+ { .reg_start = 0x56180960, .id = 0, },
+ { .reg_start = 0x561809e0, .id = 1, },
+ { .reg_start = 0x561809a0, .id = 4, },
+ { .reg_start = 0x56180a20, .id = 5, },
+};
+
+static const struct regmap_range dc_cf_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, CONSTANTCOLOR),
+};
+
+static const struct regmap_access_table dc_cf_regmap_access_table = {
+ .yes_ranges = dc_cf_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_cf_regmap_ranges),
+};
+
+static const struct regmap_config dc_cf_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_cf_regmap_access_table,
+ .rd_table = &dc_cf_regmap_access_table,
+ .max_register = CONSTANTCOLOR,
+};
+
+static inline void dc_cf_enable_shden(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, STATICCONTROL, SHDEN);
+}
+
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf)
+{
+ return cf->link;
+}
+
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w,
+ unsigned int h)
+{
+ regmap_write(cf->reg_cfg, FRAMEDIMENSIONS, WIDTH(w) | HEIGHT(h));
+}
+
+void dc_cf_constantcolor_black(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, 0);
+}
+
+void dc_cf_constantcolor_blue(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, BLUE(0xff));
+}
+
+void dc_cf_init(struct dc_cf *cf)
+{
+ dc_cf_enable_shden(cf);
+}
+
+static int dc_cf_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_cf *cf;
+ int id;
+
+ cf = devm_kzalloc(dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ cf->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_cf_cfg_regmap_config);
+ if (IS_ERR(cf->reg_cfg))
+ return PTR_ERR(cf->reg_cfg);
+
+ id = dc_subdev_get_id(dc_cf_info, ARRAY_SIZE(dc_cf_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ cf->link = LINK_ID_CONSTFRAME0;
+ dc_drm->cf_cont[0] = cf;
+ break;
+ case 1:
+ cf->link = LINK_ID_CONSTFRAME1;
+ dc_drm->cf_cont[1] = cf;
+ break;
+ case 4:
+ cf->link = LINK_ID_CONSTFRAME4;
+ dc_drm->cf_safe[0] = cf;
+ break;
+ case 5:
+ cf->link = LINK_ID_CONSTFRAME5;
+ dc_drm->cf_safe[1] = cf;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_cf_ops = {
+ .bind = dc_cf_bind,
+};
+
+static int dc_cf_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_cf_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_cf_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_cf_ops);
+}
+
+static const struct of_device_id dc_cf_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-constframe" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_cf_dt_ids);
+
+struct platform_driver dc_cf_driver = {
+ .probe = dc_cf_probe,
+ .remove = dc_cf_remove,
+ .driver = {
+ .name = "imx8-dc-constframe",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_cf_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-crtc.c b/drivers/gpu/drm/imx/dc/dc-crtc.c
new file mode 100644
index 000000000000..31d3a982deaf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-crtc.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+#define dc_crtc_dbg(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_dbg_kms(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define dc_crtc_err(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_err(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(c) \
+do { \
+ unsigned long ret; \
+ ret = wait_for_completion_timeout(&dc_crtc->c, HZ); \
+ if (ret == 0) \
+ dc_crtc_err(crtc, "%s: wait for " #c " timeout\n", \
+ __func__); \
+} while (0)
+
+#define DC_CRTC_CHECK_FRAMEGEN_FIFO(fg) \
+do { \
+ struct dc_fg *_fg = (fg); \
+ if (dc_fg_secondary_requests_to_read_empty_fifo(_fg)) { \
+ dc_fg_secondary_clear_channel_status(_fg); \
+ dc_crtc_err(crtc, "%s: FrameGen FIFO empty\n", \
+ __func__); \
+ } \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(fg) \
+do { \
+ if (dc_fg_wait_for_secondary_syncup(fg)) \
+ dc_crtc_err(crtc, \
+ "%s: FrameGen secondary channel isn't syncup\n",\
+ __func__); \
+} while (0)
+
+static inline struct dc_crtc *to_dc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dc_crtc, base);
+}
+
+static u32 dc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ return dc_fg_get_frame_index(dc_crtc->fg);
+}
+
+static int dc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ enable_irq(dc_crtc->irq_dec_framecomplete);
+
+ return 0;
+}
+
+static void dc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ /* nosync due to atomic context */
+ disable_irq_nosync(dc_crtc->irq_dec_framecomplete);
+}
+
+static const struct drm_crtc_funcs dc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .get_vblank_counter = dc_crtc_get_vblank_counter,
+ .enable_vblank = dc_crtc_enable_vblank,
+ .disable_vblank = dc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static void dc_crtc_queue_state_event(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc_state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dc_crtc->event);
+ dc_crtc->event = crtc_state->event;
+ crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static inline enum drm_mode_status
+dc_crtc_check_clock(struct dc_crtc *dc_crtc, int clk_khz)
+{
+ return dc_fg_check_clock(dc_crtc->fg, clk_khz);
+}
+
+static enum drm_mode_status
+dc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, mode->clock);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->crtc_clock > DC_FRAMEGEN_MAX_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int
+dc_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, adj->clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+dc_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ int idx, ret;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->active)
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ return;
+
+ /* request pixel engine power-on when CRTC starts to be active */
+ ret = pm_runtime_resume_and_get(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to get DC pixel engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+}
+
+static void
+dc_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ (!old_crtc_state->active && !new_crtc_state->active))
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ /* flush plane update out to display */
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum dc_link_id cf_link;
+ int idx, ret;
+
+ dc_crtc_dbg(crtc, "mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(adj));
+
+ drm_crtc_vblank_on(crtc);
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ /* request display engine power-on when CRTC is enabled */
+ ret = pm_runtime_resume_and_get(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to get DC display engine RPM: %d\n",
+ ret);
+
+ enable_irq(dc_crtc->irq_dec_shdload);
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+ enable_irq(dc_crtc->irq_ed_safe_shdload);
+
+ dc_fg_cfg_videomode(dc_crtc->fg, adj);
+
+ dc_cf_framedimensions(dc_crtc->cf_cont,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+ dc_cf_framedimensions(dc_crtc->cf_safe,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+
+ /* constframe in safety stream shows blue frame */
+ dc_cf_constantcolor_blue(dc_crtc->cf_safe);
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_safe);
+ dc_ed_pec_src_sel(dc_crtc->ed_safe, cf_link);
+
+ /* show CRTC background if no plane is enabled */
+ if (new_crtc_state->plane_mask == 0) {
+ /* constframe in content stream shows black frame */
+ dc_cf_constantcolor_black(dc_crtc->cf_cont);
+
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_cont);
+ dc_ed_pec_src_sel(dc_crtc->ed_cont, cf_link);
+ }
+
+ dc_fg_enable_clock(dc_crtc->fg);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_safe);
+ dc_fg_shdtokgen(dc_crtc->fg);
+ dc_fg_enable(dc_crtc->fg);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_safe_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_safe_shdload);
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+ disable_irq(dc_crtc->irq_dec_shdload);
+
+ DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(dc_crtc->fg);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx, ret;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_dec_seqcomplete);
+ dc_fg_disable(dc_crtc->fg);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_seqcomplete_done);
+ disable_irq(dc_crtc->irq_dec_seqcomplete);
+
+ dc_fg_disable_clock(dc_crtc->fg);
+
+ /* request pixel engine power-off as plane is off too */
+ ret = pm_runtime_put(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to put DC pixel engine RPM: %d\n",
+ ret);
+
+ /* request display engine power-off when CRTC is disabled */
+ ret = pm_runtime_put(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to put DC display engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+
+out:
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (new_crtc_state->event && !new_crtc_state->active) {
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool dc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int vdisplay = mode->crtc_vdisplay;
+ int vtotal = mode->crtc_vtotal;
+ bool reliable;
+ int line;
+ int idx;
+
+ if (stime)
+ *stime = ktime_get();
+
+ if (!drm_dev_enter(crtc->dev, &idx)) {
+ reliable = false;
+ *vpos = 0;
+ *hpos = 0;
+ goto out;
+ }
+
+ /* line index starts with 0 for the first active output line */
+ line = dc_fg_get_line_index(dc_crtc->fg);
+
+ if (line < vdisplay)
+ /* active scanout area - positive */
+ *vpos = line + 1;
+ else
+ /* inside vblank - negative */
+ *vpos = line - (vtotal - 1);
+
+ *hpos = 0;
+
+ reliable = true;
+
+ drm_dev_exit(idx);
+out:
+ if (etime)
+ *etime = ktime_get();
+
+ return reliable;
+}
+
+static const struct drm_crtc_helper_funcs dc_helper_funcs = {
+ .mode_valid = dc_crtc_mode_valid,
+ .atomic_check = dc_crtc_atomic_check,
+ .atomic_begin = dc_crtc_atomic_begin,
+ .atomic_flush = dc_crtc_atomic_flush,
+ .atomic_enable = dc_crtc_atomic_enable,
+ .atomic_disable = dc_crtc_atomic_disable,
+ .get_scanout_position = dc_crtc_get_scanout_position,
+};
+
+static irqreturn_t dc_crtc_irq_handler_dec_framecomplete(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dc_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dc_crtc->event);
+ dc_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_dec_seqcomplete_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_seqcomplete_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dc_crtc_irq_handler_dec_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_cont_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_cont_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_safe_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_safe_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dc_crtc_request_irqs(struct drm_device *drm, struct dc_crtc *dc_crtc)
+{
+ struct {
+ struct device *dev;
+ unsigned int irq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ } irqs[DC_CRTC_IRQS] = {
+ {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_framecomplete,
+ dc_crtc_irq_handler_dec_framecomplete,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_seqcomplete,
+ dc_crtc_irq_handler_dec_seqcomplete_done,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_shdload,
+ dc_crtc_irq_handler_dec_shdload_done,
+ }, {
+ dc_crtc->ed_cont->dev,
+ dc_crtc->irq_ed_cont_shdload,
+ dc_crtc_irq_handler_ed_cont_shdload_done,
+ }, {
+ dc_crtc->ed_safe->dev,
+ dc_crtc->irq_ed_safe_shdload,
+ dc_crtc_irq_handler_ed_safe_shdload_done,
+ },
+ };
+ int i, ret;
+
+ for (i = 0; i < DC_CRTC_IRQS; i++) {
+ struct dc_crtc_irq *irq = &dc_crtc->irqs[i];
+
+ ret = devm_request_irq(irqs[i].dev, irqs[i].irq,
+ irqs[i].irq_handler, IRQF_NO_AUTOEN,
+ dev_name(irqs[i].dev), dc_crtc);
+ if (ret) {
+ dev_err(irqs[i].dev, "failed to request irq(%u): %d\n",
+ irqs[i].irq, ret);
+ return ret;
+ }
+
+ irq->dc_crtc = dc_crtc;
+ irq->irq = irqs[i].irq;
+ }
+
+ return 0;
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct dc_de *de = dc_drm->de[crtc_index];
+ struct dc_pe *pe = dc_drm->pe;
+ struct dc_plane *dc_primary;
+ int ret;
+
+ dc_crtc->de = de;
+
+ init_completion(&dc_crtc->dec_seqcomplete_done);
+ init_completion(&dc_crtc->dec_shdload_done);
+ init_completion(&dc_crtc->ed_cont_shdload_done);
+ init_completion(&dc_crtc->ed_safe_shdload_done);
+
+ dc_crtc->cf_cont = pe->cf_cont[crtc_index];
+ dc_crtc->cf_safe = pe->cf_safe[crtc_index];
+ dc_crtc->ed_cont = pe->ed_cont[crtc_index];
+ dc_crtc->ed_safe = pe->ed_safe[crtc_index];
+ dc_crtc->fg = de->fg;
+
+ dc_crtc->irq_dec_framecomplete = de->irq_framecomplete;
+ dc_crtc->irq_dec_seqcomplete = de->irq_seqcomplete;
+ dc_crtc->irq_dec_shdload = de->irq_shdload;
+ dc_crtc->irq_ed_safe_shdload = dc_crtc->ed_safe->irq_shdload;
+ dc_crtc->irq_ed_cont_shdload = dc_crtc->ed_cont->irq_shdload;
+
+ dc_primary = &dc_drm->dc_primary[crtc_index];
+ ret = dc_plane_init(dc_drm, dc_primary);
+ if (ret) {
+ dev_err(de->dev, "failed to initialize primary plane: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&dc_crtc->base, &dc_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &dc_crtc->base, &dc_primary->base,
+ NULL, &dc_crtc_funcs, NULL);
+ if (ret)
+ dev_err(de->dev, "failed to add CRTC: %d\n", ret);
+
+ return ret;
+}
+
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+
+ return dc_crtc_request_irqs(drm, dc_crtc);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-de.c b/drivers/gpu/drm/imx/dc/dc-de.c
new file mode 100644
index 000000000000..5a3125596fdf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define POLARITYCTRL 0xc
+#define POLEN_HIGH BIT(2)
+
+static const struct dc_subdev_info dc_de_info[] = {
+ { .reg_start = 0x5618b400, .id = 0, },
+ { .reg_start = 0x5618b420, .id = 1, },
+};
+
+static const struct regmap_range dc_de_regmap_ranges[] = {
+ regmap_reg_range(POLARITYCTRL, POLARITYCTRL),
+};
+
+static const struct regmap_access_table dc_de_regmap_access_table = {
+ .yes_ranges = dc_de_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_de_regmap_ranges),
+};
+
+static const struct regmap_config dc_de_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_de_regmap_access_table,
+ .rd_table = &dc_de_regmap_access_table,
+ .max_register = POLARITYCTRL,
+};
+
+static inline void dc_dec_init(struct dc_de *de)
+{
+ regmap_write_bits(de->reg_top, POLARITYCTRL, POLARITYCTRL, POLEN_HIGH);
+}
+
+static int dc_de_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_top;
+ void __iomem *base_top;
+ struct dc_de *de;
+ int ret, id;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ base_top = devm_platform_get_and_ioremap_resource(pdev, 0, &res_top);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ de->reg_top = devm_regmap_init_mmio(dev, base_top,
+ &dc_de_top_regmap_config);
+ if (IS_ERR(de->reg_top))
+ return PTR_ERR(de->reg_top);
+
+ de->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (de->irq_shdload < 0)
+ return de->irq_shdload;
+
+ de->irq_framecomplete = platform_get_irq_byname(pdev, "framecomplete");
+ if (de->irq_framecomplete < 0)
+ return de->irq_framecomplete;
+
+ de->irq_seqcomplete = platform_get_irq_byname(pdev, "seqcomplete");
+ if (de->irq_seqcomplete < 0)
+ return de->irq_seqcomplete;
+
+ de->dev = dev;
+
+ dev_set_drvdata(dev, de);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ id = dc_subdev_get_id(dc_de_info, ARRAY_SIZE(dc_de_info), res_top);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ dc_drm->de[id] = de;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the display engine component first. To avoid the dependency, post bind
+ * to get the pointers from dc_drm in a safe manner.
+ */
+void dc_de_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_de *de;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ de = dc_drm->de[i];
+ de->fg = dc_drm->fg[i];
+ de->tc = dc_drm->tc[i];
+ }
+}
+
+static const struct component_ops dc_de_ops = {
+ .bind = dc_de_bind,
+};
+
+static int dc_de_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_de_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_de_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_de_ops);
+}
+
+static int dc_de_runtime_resume(struct device *dev)
+{
+ struct dc_de *de = dev_get_drvdata(dev);
+
+ dc_dec_init(de);
+ dc_fg_init(de->fg);
+ dc_tc_init(de->tc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_de_pm_ops = {
+ RUNTIME_PM_OPS(NULL, dc_de_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_de_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-display-engine" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_de_dt_ids);
+
+struct platform_driver dc_de_driver = {
+ .probe = dc_de_probe,
+ .remove = dc_de_remove,
+ .driver = {
+ .name = "imx8-dc-display-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_de_dt_ids,
+ .pm = pm_sleep_ptr(&dc_de_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-de.h b/drivers/gpu/drm/imx/dc/dc-de.h
new file mode 100644
index 000000000000..211f3fcc1a9a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DISPLAY_ENGINE_H__
+#define __DC_DISPLAY_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <drm/drm_modes.h>
+
+#define DC_DISPLAYS 2
+
+#define DC_FRAMEGEN_MAX_FRAME_INDEX 0x3ffff
+#define DC_FRAMEGEN_MAX_CLOCK_KHZ 300000
+
+struct dc_fg {
+ struct device *dev;
+ struct regmap *reg;
+ struct clk *clk_disp;
+};
+
+struct dc_tc {
+ struct device *dev;
+ struct regmap *reg;
+};
+
+struct dc_de {
+ struct device *dev;
+ struct regmap *reg_top;
+ struct dc_fg *fg;
+ struct dc_tc *tc;
+ int irq_shdload;
+ int irq_framecomplete;
+ int irq_seqcomplete;
+};
+
+/* Frame Generator Unit */
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m);
+void dc_fg_enable(struct dc_fg *fg);
+void dc_fg_disable(struct dc_fg *fg);
+void dc_fg_shdtokgen(struct dc_fg *fg);
+u32 dc_fg_get_frame_index(struct dc_fg *fg);
+u32 dc_fg_get_line_index(struct dc_fg *fg);
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg);
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg);
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg);
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg);
+void dc_fg_enable_clock(struct dc_fg *fg);
+void dc_fg_disable_clock(struct dc_fg *fg);
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz);
+void dc_fg_init(struct dc_fg *fg);
+
+/* Timing Controller Unit */
+void dc_tc_init(struct dc_tc *tc);
+
+#endif /* __DC_DISPLAY_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.c b/drivers/gpu/drm/imx/dc/dc-drv.c
new file mode 100644
index 000000000000..04f021d2d6cf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+struct dc_priv {
+ struct drm_device *drm;
+ struct clk *clk_cfg;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(dc_drm_driver_fops);
+
+static struct drm_driver dc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dc_drm_driver_fops,
+ .name = "imx8-dc",
+ .desc = "i.MX8 DC DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static void
+dc_add_components(struct device *dev, struct component_match **matchptr)
+{
+ struct device_node *child, *grandchild;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* The interrupt controller is not a component. */
+ if (of_device_is_compatible(child, "fsl,imx8qxp-dc-intc"))
+ continue;
+
+ drm_of_component_match_add(dev, matchptr, component_compare_of,
+ child);
+
+ for_each_available_child_of_node(child, grandchild)
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of,
+ grandchild);
+ }
+}
+
+static int dc_drm_component_bind_all(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret;
+
+ ret = component_bind_all(drm->dev, dc_drm);
+ if (ret)
+ return ret;
+
+ dc_de_post_bind(dc_drm);
+ dc_pe_post_bind(dc_drm);
+
+ return 0;
+}
+
+static void dc_drm_component_unbind_all(void *ptr)
+{
+ struct dc_drm_device *dc_drm = ptr;
+ struct drm_device *drm = &dc_drm->base;
+
+ component_unbind_all(drm->dev, dc_drm);
+}
+
+static int dc_drm_bind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm;
+ struct drm_device *drm;
+ int ret;
+
+ dc_drm = devm_drm_dev_alloc(dev, &dc_drm_driver, struct dc_drm_device,
+ base);
+ if (IS_ERR(dc_drm))
+ return PTR_ERR(dc_drm);
+
+ drm = &dc_drm->base;
+
+ ret = dc_drm_component_bind_all(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, dc_drm_component_unbind_all,
+ dc_drm);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ dev_err(dev, "failed to register drm device: %d\n", ret);
+ goto err;
+ }
+
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_XRGB8888);
+
+ priv->drm = drm;
+
+ return 0;
+
+err:
+ dc_kms_uninit(dc_drm);
+
+ return ret;
+}
+
+static void dc_drm_unbind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(priv->drm);
+ struct drm_device *drm = &dc_drm->base;
+
+ priv->drm = NULL;
+ drm_dev_unplug(drm);
+ dc_kms_uninit(dc_drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct component_master_ops dc_drm_ops = {
+ .bind = dc_drm_bind,
+ .unbind = dc_drm_unbind,
+};
+
+static int dc_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct dc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk_cfg = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_cfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cfg),
+ "failed to get cfg clock\n");
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ dc_add_components(&pdev->dev, &match);
+
+ ret = component_master_add_with_match(&pdev->dev, &dc_drm_ops, match);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component master\n");
+
+ return 0;
+}
+
+static void dc_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &dc_drm_ops);
+}
+
+static int dc_runtime_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_cfg);
+
+ return 0;
+}
+
+static int dc_runtime_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_cfg);
+ if (ret)
+ dev_err(dev, "failed to enable cfg clock: %d\n", ret);
+
+ return ret;
+}
+
+static int dc_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int dc_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(priv->drm);
+}
+
+static void dc_shutdown(struct platform_device *pdev)
+{
+ struct dc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
+static const struct dev_pm_ops dc_pm_ops = {
+ RUNTIME_PM_OPS(dc_runtime_suspend, dc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dc_suspend, dc_resume)
+};
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_driver = {
+ .probe = dc_probe,
+ .remove = dc_remove,
+ .shutdown = dc_shutdown,
+ .driver = {
+ .name = "imx8-dc",
+ .of_match_table = dc_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pm_ops),
+ },
+};
+
+static struct platform_driver * const dc_drivers[] = {
+ &dc_cf_driver,
+ &dc_de_driver,
+ &dc_ed_driver,
+ &dc_fg_driver,
+ &dc_fl_driver,
+ &dc_fw_driver,
+ &dc_ic_driver,
+ &dc_lb_driver,
+ &dc_pe_driver,
+ &dc_tc_driver,
+ &dc_driver,
+};
+
+static int __init dc_drm_init(void)
+{
+ return platform_register_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+static void __exit dc_drm_exit(void)
+{
+ platform_unregister_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+module_init(dc_drm_init);
+module_exit(dc_drm_exit);
+
+MODULE_DESCRIPTION("i.MX8 Display Controller DRM Driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.h b/drivers/gpu/drm/imx/dc/dc-drv.h
new file mode 100644
index 000000000000..eb61b8c76269
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DRV_H__
+#define __DC_DRV_H__
+
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+
+#include "dc-de.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+/**
+ * struct dc_drm_device - DC specific drm_device
+ */
+struct dc_drm_device {
+ /** @base: base drm_device structure */
+ struct drm_device base;
+ /** @dc_crtc: DC specific CRTC list */
+ struct dc_crtc dc_crtc[DC_DISPLAYS];
+ /** @dc_primary: DC specific primary plane list */
+ struct dc_plane dc_primary[DC_DISPLAYS];
+ /** @encoder: encoder list */
+ struct drm_encoder encoder[DC_DISPLAYS];
+ /** @cf_safe: constframe list(safety stream) */
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ /** @cf_cont: constframe list(content stream) */
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ /** @de: display engine list */
+ struct dc_de *de[DC_DISPLAYS];
+ /** @ed_safe: extdst list(safety stream) */
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ /** @ed_cont: extdst list(content stream) */
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ /** @fg: framegen list */
+ struct dc_fg *fg[DC_DISPLAYS];
+ /** @fu_disp: fetchunit list(used by display engine) */
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ /** @lb: layerblend list */
+ struct dc_lb *lb[DC_LB_CNT];
+ /** @pe: pixel engine */
+ struct dc_pe *pe;
+ /** @tc: tcon list */
+ struct dc_tc *tc[DC_DISPLAYS];
+};
+
+struct dc_subdev_info {
+ resource_size_t reg_start;
+ int id;
+};
+
+static inline struct dc_drm_device *to_dc_drm_device(struct drm_device *drm)
+{
+ return container_of(drm, struct dc_drm_device, base);
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index);
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index);
+
+int dc_kms_init(struct dc_drm_device *dc_drm);
+void dc_kms_uninit(struct dc_drm_device *dc_drm);
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane);
+
+extern struct platform_driver dc_cf_driver;
+extern struct platform_driver dc_de_driver;
+extern struct platform_driver dc_ed_driver;
+extern struct platform_driver dc_fg_driver;
+extern struct platform_driver dc_fl_driver;
+extern struct platform_driver dc_fw_driver;
+extern struct platform_driver dc_ic_driver;
+extern struct platform_driver dc_lb_driver;
+extern struct platform_driver dc_pe_driver;
+extern struct platform_driver dc_tc_driver;
+
+static inline int dc_subdev_get_id(const struct dc_subdev_info *info,
+ int info_cnt, struct resource *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < info_cnt; i++)
+ if (info[i].reg_start == res->start)
+ return info[i].id;
+
+ return -EINVAL;
+}
+
+void dc_de_post_bind(struct dc_drm_device *dc_drm);
+void dc_pe_post_bind(struct dc_drm_device *dc_drm);
+
+#endif /* __DC_DRV_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
new file mode 100644
index 000000000000..86ecc22d0a55
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define POWERDOWN BIT(4)
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define DIV_MASK GENMASK(23, 16)
+#define DIV(x) FIELD_PREP(DIV_MASK, (x))
+#define DIV_RESET 0x80
+
+#define PIXENGCFG_DYNAMIC 0xc
+
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+
+#define STATICCONTROL 0x8
+#define KICK_MODE BIT(8)
+#define EXTERNAL BIT(8)
+#define PERFCOUNTMODE BIT(12)
+
+#define CONTROL 0xc
+#define GAMMAAPPLYENABLE BIT(0)
+
+static const struct dc_subdev_info dc_ed_info[] = {
+ { .reg_start = 0x56180980, .id = 0, },
+ { .reg_start = 0x56180a00, .id = 1, },
+ { .reg_start = 0x561809c0, .id = 4, },
+ { .reg_start = 0x56180a40, .id = 5, },
+};
+
+static const struct regmap_range dc_ed_pec_regmap_write_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_write_table = {
+ .yes_ranges = dc_ed_pec_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_read_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_read_table = {
+ .yes_ranges = dc_ed_pec_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_volatile_ranges[] = {
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_volatile_table = {
+ .yes_ranges = dc_ed_pec_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ed_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_pec_regmap_write_table,
+ .rd_table = &dc_ed_pec_regmap_read_table,
+ .volatile_table = &dc_ed_pec_regmap_volatile_table,
+ .max_register = PIXENGCFG_TRIGGER,
+};
+
+static const struct regmap_range dc_ed_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, STATICCONTROL),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_ed_regmap_access_table = {
+ .yes_ranges = dc_ed_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_regmap_ranges),
+};
+
+static const struct regmap_config dc_ed_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_regmap_access_table,
+ .rd_table = &dc_ed_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static const enum dc_link_id src_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ LINK_ID_LAYERBLEND3,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND0,
+};
+
+static inline void dc_ed_pec_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_pec_poweron(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, POWERDOWN, 0);
+}
+
+static inline void dc_ed_pec_sync_mode_single(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SYNC_MODE, SINGLE);
+}
+
+static inline void dc_ed_pec_div_reset(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, DIV_MASK,
+ DIV(DIV_RESET));
+}
+
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(src_sels); i++) {
+ if (src_sels[i] == src) {
+ regmap_write(ed->reg_pec, PIXENGCFG_DYNAMIC, src);
+ return;
+ }
+ }
+}
+
+void dc_ed_pec_sync_trigger(struct dc_ed *ed)
+{
+ regmap_write(ed->reg_pec, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+}
+
+static inline void dc_ed_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_kick_mode_external(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, KICK_MODE, EXTERNAL);
+}
+
+static inline void dc_ed_disable_perfcountmode(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, PERFCOUNTMODE, 0);
+}
+
+static inline void dc_ed_disable_gamma_apply(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, CONTROL, GAMMAAPPLYENABLE, 0);
+}
+
+void dc_ed_init(struct dc_ed *ed)
+{
+ dc_ed_pec_src_sel(ed, LINK_ID_NONE);
+ dc_ed_pec_enable_shden(ed);
+ dc_ed_pec_poweron(ed);
+ dc_ed_pec_sync_mode_single(ed);
+ dc_ed_pec_div_reset(ed);
+ dc_ed_enable_shden(ed);
+ dc_ed_disable_perfcountmode(ed);
+ dc_ed_kick_mode_external(ed);
+ dc_ed_disable_gamma_apply(ed);
+}
+
+static int dc_ed_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_ed *ed;
+ int id;
+
+ ed = devm_kzalloc(dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ ed->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_ed_pec_regmap_config);
+ if (IS_ERR(ed->reg_pec))
+ return PTR_ERR(ed->reg_pec);
+
+ ed->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_ed_cfg_regmap_config);
+ if (IS_ERR(ed->reg_cfg))
+ return PTR_ERR(ed->reg_cfg);
+
+ ed->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (ed->irq_shdload < 0)
+ return ed->irq_shdload;
+
+ ed->dev = dev;
+
+ id = dc_subdev_get_id(dc_ed_info, ARRAY_SIZE(dc_ed_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ dc_drm->ed_cont[0] = ed;
+ break;
+ case 1:
+ dc_drm->ed_cont[1] = ed;
+ break;
+ case 4:
+ dc_drm->ed_safe[0] = ed;
+ break;
+ case 5:
+ dc_drm->ed_safe[1] = ed;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_ed_ops = {
+ .bind = dc_ed_bind,
+};
+
+static int dc_ed_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_ed_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_ed_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_ed_ops);
+}
+
+static const struct of_device_id dc_ed_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-extdst" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_ed_dt_ids);
+
+struct platform_driver dc_ed_driver = {
+ .probe = dc_ed_probe,
+ .remove = dc_ed_remove,
+ .driver = {
+ .name = "imx8-dc-extdst",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ed_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
new file mode 100644
index 000000000000..7f6c1852bf72
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <drm/drm_modes.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK GENMASK(2, 1)
+#define FGSYNCMODE(x) FIELD_PREP(FGSYNCMODE_MASK, (x))
+#define SHDEN BIT(0)
+
+#define HTCFG1 0xc
+#define HTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define VTCFG1 0x14
+#define VTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define PKICKCONFIG 0x2c
+#define SKICKCONFIG 0x30
+#define EN BIT(31)
+#define ROW(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define COL(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define PACFG 0x54
+#define SACFG 0x58
+#define STARTY(x) FIELD_PREP(GENMASK(29, 16), ((x) + 1))
+#define STARTX(x) FIELD_PREP(GENMASK(13, 0), ((x) + 1))
+
+#define FGINCTRL 0x5c
+#define FGINCTRLPANIC 0x60
+#define FGDM_MASK GENMASK(2, 0)
+#define ENPRIMALPHA BIT(3)
+#define ENSECALPHA BIT(4)
+
+#define FGCCR 0x64
+#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
+
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+
+#define FGSLR 0x6c
+#define SHDTOKGEN BIT(0)
+
+#define FGTIMESTAMP 0x74
+#define FRAMEINDEX(x) FIELD_GET(GENMASK(31, 14), (x))
+#define LINEINDEX(x) FIELD_GET(GENMASK(13, 0), (x))
+
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define SFIFOEMPTY BIT(16)
+
+#define FGCHSTATCLR 0x7c
+#define CLRSECSTAT BIT(16)
+
+enum dc_fg_syncmode {
+ FG_SYNCMODE_OFF, /* No side-by-side synchronization. */
+};
+
+enum dc_fg_dm {
+ FG_DM_CONSTCOL = 0x1, /* Constant Color Background is shown. */
+ FG_DM_SEC_ON_TOP = 0x5, /* Both inputs overlaid with secondary on top. */
+};
+
+static const struct dc_subdev_info dc_fg_info[] = {
+ { .reg_start = 0x5618b800, .id = 0, },
+ { .reg_start = 0x5618d400, .id = 1, },
+};
+
+static const struct regmap_range dc_fg_regmap_write_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGSLR),
+ regmap_reg_range(FGCHSTATCLR, FGCHSTATCLR),
+};
+
+static const struct regmap_range dc_fg_regmap_read_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGENABLE),
+ regmap_reg_range(FGTIMESTAMP, FGCHSTAT),
+};
+
+static const struct regmap_access_table dc_fg_regmap_write_table = {
+ .yes_ranges = dc_fg_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_write_ranges),
+};
+
+static const struct regmap_access_table dc_fg_regmap_read_table = {
+ .yes_ranges = dc_fg_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_read_ranges),
+};
+
+static const struct regmap_config dc_fg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fg_regmap_write_table,
+ .rd_table = &dc_fg_regmap_read_table,
+ .max_register = FGCHSTATCLR,
+};
+
+static inline void dc_fg_enable_shden(struct dc_fg *fg)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, SHDEN, SHDEN);
+}
+
+static inline void dc_fg_syncmode(struct dc_fg *fg, enum dc_fg_syncmode mode)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, FGSYNCMODE_MASK, FGSYNCMODE(mode));
+}
+
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ int ret;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ /* video mode */
+ regmap_write(fg->reg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ regmap_write(fg->reg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ regmap_write(fg->reg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ regmap_write(fg->reg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ regmap_write(fg->reg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ regmap_write(fg->reg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary and secondary area position configuration */
+ regmap_write(fg->reg, PACFG, STARTX(0) | STARTY(0));
+ regmap_write(fg->reg, SACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ regmap_write_bits(fg->reg, FGINCTRL, ENPRIMALPHA | ENSECALPHA, 0);
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, ENPRIMALPHA | ENSECALPHA, 0);
+
+ /* constant color is green(used in panic mode) */
+ regmap_write(fg->reg, FGCCR, CCGREEN(0x3ff));
+
+ ret = clk_set_rate(fg->clk_disp, m->clock * HZ_PER_KHZ);
+ if (ret < 0)
+ dev_err(fg->dev, "failed to set display clock rate: %d\n", ret);
+}
+
+static inline void dc_fg_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRL, FGDM_MASK, mode);
+}
+
+static inline void dc_fg_panic_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, FGDM_MASK, mode);
+}
+
+void dc_fg_enable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, FGEN);
+}
+
+void dc_fg_disable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, 0);
+}
+
+void dc_fg_shdtokgen(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGSLR, SHDTOKGEN);
+}
+
+u32 dc_fg_get_frame_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return FRAMEINDEX(val);
+}
+
+u32 dc_fg_get_line_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return LINEINDEX(val);
+}
+
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 frame_index, last_frame_index;
+
+ frame_index = dc_fg_get_frame_index(fg);
+ do {
+ last_frame_index = frame_index;
+ frame_index = dc_fg_get_frame_index(fg);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ return last_frame_index != frame_index;
+}
+
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGCHSTAT, &val);
+
+ return !!(val & SFIFOEMPTY);
+}
+
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGCHSTATCLR, CLRSECSTAT);
+}
+
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(fg->reg, FGCHSTAT, val,
+ val & SECSYNCSTAT, 5, 100000);
+}
+
+void dc_fg_enable_clock(struct dc_fg *fg)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fg->clk_disp);
+ if (ret)
+ dev_err(fg->dev, "failed to enable display clock: %d\n", ret);
+}
+
+void dc_fg_disable_clock(struct dc_fg *fg)
+{
+ clk_disable_unprepare(fg->clk_disp);
+}
+
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz)
+{
+ unsigned long rounded_rate;
+
+ rounded_rate = clk_round_rate(fg->clk_disp, clk_khz * HZ_PER_KHZ);
+
+ if (rounded_rate != clk_khz * HZ_PER_KHZ)
+ return MODE_NOCLOCK;
+
+ return MODE_OK;
+}
+
+void dc_fg_init(struct dc_fg *fg)
+{
+ dc_fg_enable_shden(fg);
+ dc_fg_syncmode(fg, FG_SYNCMODE_OFF);
+ dc_fg_displaymode(fg, FG_DM_SEC_ON_TOP);
+ dc_fg_panic_displaymode(fg, FG_DM_CONSTCOL);
+}
+
+static int dc_fg_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_fg *fg;
+ int id;
+
+ fg = devm_kzalloc(dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fg->reg = devm_regmap_init_mmio(dev, base, &dc_fg_regmap_config);
+ if (IS_ERR(fg->reg))
+ return PTR_ERR(fg->reg);
+
+ fg->clk_disp = devm_clk_get(dev, NULL);
+ if (IS_ERR(fg->clk_disp))
+ return dev_err_probe(dev, PTR_ERR(fg->clk_disp),
+ "failed to get display clock\n");
+
+ id = dc_subdev_get_id(dc_fg_info, ARRAY_SIZE(dc_fg_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fg->dev = dev;
+ dc_drm->fg[id] = fg;
+
+ return 0;
+}
+
+static const struct component_ops dc_fg_ops = {
+ .bind = dc_fg_bind,
+};
+
+static int dc_fg_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fg_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fg_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fg_ops);
+}
+
+static const struct of_device_id dc_fg_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-framegen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fg_dt_ids);
+
+struct platform_driver dc_fg_driver = {
+ .probe = dc_fg_probe,
+ .remove = dc_fg_remove,
+ .driver = {
+ .name = "imx8-dc-framegen",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fl.c b/drivers/gpu/drm/imx/dc/dc-fl.c
new file mode 100644
index 000000000000..3ce24c72aa13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fl.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+
+struct dc_fl {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fl_info[] = {
+ { .reg_start = 0x56180ac0, .id = 0, },
+};
+
+static const struct regmap_range dc_fl_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+};
+
+static const struct regmap_access_table dc_fl_regmap_access_table = {
+ .yes_ranges = dc_fl_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fl_regmap_ranges),
+};
+
+static const struct regmap_config dc_fl_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fl_regmap_access_table,
+ .rd_table = &dc_fl_regmap_access_table,
+ .max_register = FRAMEDIMENSIONS,
+};
+
+static void dc_fl_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fl_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fl_init(struct dc_fu *fu)
+{
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fl_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fl_init;
+ fu->ops.set_fmt = dc_fl_set_fmt;
+ fu->ops.set_framedimensions = dc_fl_set_framedimensions;
+}
+
+static int dc_fl_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_fl *fl;
+ struct dc_fu *fu;
+ int i, id;
+
+ fl = devm_kzalloc(dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fl_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fl_info, ARRAY_SIZE(dc_fl_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHLAYER0;
+ fu->id = DC_FETCHUNIT_FL0;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchLayer%d", id);
+
+ dc_fl_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fl_ops = {
+ .bind = dc_fl_bind,
+};
+
+static int dc_fl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fl_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fl_ops);
+}
+
+static const struct of_device_id dc_fl_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchlayer" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fl_dt_ids);
+
+struct platform_driver dc_fl_driver = {
+ .probe = dc_fl_probe,
+ .remove = dc_fl_remove,
+ .driver = {
+ .name = "imx8-dc-fetchlayer",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fl_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
new file mode 100644
index 000000000000..f94c591c8158
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+/* STATICCONTROL */
+#define SHDLDREQSTICKY_MASK GENMASK(31, 24)
+#define SHDLDREQSTICKY(x) FIELD_PREP(SHDLDREQSTICKY_MASK, (x))
+#define BASEADDRESSAUTOUPDATE_MASK GENMASK(23, 16)
+#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
+
+/* BURSTBUFFERMANAGEMENT */
+#define SETBURSTLENGTH_MASK GENMASK(12, 8)
+#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
+#define SETNUMBUFFERS_MASK GENMASK(7, 0)
+#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
+#define LINEMODE_MASK BIT(31)
+
+/* SOURCEBUFFERATTRIBUTES */
+#define BITSPERPIXEL_MASK GENMASK(21, 16)
+#define BITSPERPIXEL(x) FIELD_PREP(BITSPERPIXEL_MASK, (x))
+#define STRIDE_MASK GENMASK(15, 0)
+#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
+
+/* SOURCEBUFFERDIMENSION */
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* LAYEROFFSET */
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWOFFSET */
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWDIMENSIONS */
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
+#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+
+enum dc_linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE_DISPLAY = 0,
+};
+
+struct dc_fu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shifts;
+};
+
+static const struct dc_fu_pixel_format pixel_formats[] = {
+ {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *bits = pixel_formats[i].bits;
+ return;
+ }
+ }
+}
+
+void
+dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *shifts = pixel_formats[i].shifts;
+ return;
+ }
+ }
+}
+
+static inline void dc_fu_enable_shden(struct dc_fu *fu)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_fu_baddr_autoupdate(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL,
+ BASEADDRESSAUTOUPDATE_MASK,
+ BASEADDRESSAUTOUPDATE(layer_mask));
+}
+
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDLDREQSTICKY_MASK,
+ SHDLDREQSTICKY(layer_mask));
+}
+
+static inline void dc_fu_set_linemode(struct dc_fu *fu, enum dc_linemode mode)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT, LINEMODE_MASK,
+ mode);
+}
+
+static inline void dc_fu_set_numbuffers(struct dc_fu *fu, unsigned int num)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS_MASK, SETNUMBUFFERS(num));
+}
+
+static void dc_fu_set_burstlength(struct dc_fu *fu, dma_addr_t baddr)
+{
+ unsigned int burst_size, burst_length;
+
+ burst_size = 1 << __ffs(baddr);
+ burst_size = round_up(burst_size, 8);
+ burst_size = min(burst_size, 128U);
+ burst_length = burst_size / 8;
+
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETBURSTLENGTH_MASK, SETBURSTLENGTH(burst_length));
+}
+
+static void dc_fu_set_baseaddress(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr)
+{
+ regmap_write(fu->reg_cfg, fu->reg_baseaddr[frac], baddr);
+}
+
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ BITSPERPIXEL_MASK, BITSPERPIXEL(bpp));
+}
+
+static void dc_fu_set_src_stride(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ STRIDE_MASK, STRIDE(stride));
+}
+
+static void dc_fu_set_src_buf_dimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_sourcebufferdimension[frac],
+ LINEWIDTH(w) | LINECOUNT(h));
+}
+
+static inline void dc_fu_layeroffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layeroffset[frac],
+ LAYERXOFFSET(x) | LAYERYOFFSET(y));
+}
+
+static inline void dc_fu_clipoffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowoffset[frac],
+ CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y));
+}
+
+static inline void dc_fu_clipdimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int w, unsigned int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowdimensions[frac],
+ CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h));
+}
+
+static inline void
+dc_fu_set_pixel_blend_mode(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layerproperty[frac], 0);
+ regmap_write(fu->reg_cfg, fu->reg_constantcolor[frac], 0);
+}
+
+static void dc_fu_enable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, SOURCEBUFFERENABLE);
+}
+
+static void dc_fu_disable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, 0);
+
+ if (fu->lb) {
+ dc_lb_pec_clken(fu->lb, CLKEN_DISABLE);
+ dc_lb_mode(fu->lb, LB_NEUTRAL);
+ }
+}
+
+static void dc_fu_set_layerblend(struct dc_fu *fu, struct dc_lb *lb)
+{
+ fu->lb = lb;
+}
+
+static enum dc_link_id dc_fu_get_link_id(struct dc_fu *fu)
+{
+ return fu->link_id;
+}
+
+static const char *dc_fu_get_name(struct dc_fu *fu)
+{
+ return fu->name;
+}
+
+const struct dc_fu_ops dc_fu_common_ops = {
+ .set_burstlength = dc_fu_set_burstlength,
+ .set_baseaddress = dc_fu_set_baseaddress,
+ .set_src_stride = dc_fu_set_src_stride,
+ .set_src_buf_dimensions = dc_fu_set_src_buf_dimensions,
+ .enable_src_buf = dc_fu_enable_src_buf,
+ .disable_src_buf = dc_fu_disable_src_buf,
+ .set_layerblend = dc_fu_set_layerblend,
+ .get_link_id = dc_fu_get_link_id,
+ .get_name = dc_fu_get_name,
+};
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu)
+{
+ return &fu->ops;
+}
+
+void dc_fu_common_hw_init(struct dc_fu *fu)
+{
+ enum dc_fu_frac i;
+
+ dc_fu_baddr_autoupdate(fu, 0x0);
+ dc_fu_enable_shden(fu);
+ dc_fu_set_linemode(fu, LINEMODE_DISPLAY);
+ dc_fu_set_numbuffers(fu, 16);
+
+ for (i = DC_FETCHUNIT_FRAC0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ dc_fu_layeroffset(fu, i, 0, 0);
+ dc_fu_clipoffset(fu, i, 0, 0);
+ dc_fu_clipdimensions(fu, i, 1, 1);
+ dc_fu_disable_src_buf(fu, i);
+ dc_fu_set_pixel_blend_mode(fu, i);
+ }
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
new file mode 100644
index 000000000000..e016e1ea5b4e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_FETCHUNIT_H__
+#define __DC_FETCHUNIT_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-pe.h"
+
+#define FRAC_OFFSET 0x28
+
+#define STATICCONTROL 0x8
+#define BURSTBUFFERMANAGEMENT 0xc
+
+/* COLORCOMPONENTBITS */
+#define R_BITS(x) FIELD_PREP_CONST(GENMASK(27, 24), (x))
+#define G_BITS(x) FIELD_PREP_CONST(GENMASK(19, 16), (x))
+#define B_BITS(x) FIELD_PREP_CONST(GENMASK(11, 8), (x))
+#define A_BITS(x) FIELD_PREP_CONST(GENMASK(3, 0), (x))
+
+/* COLORCOMPONENTSHIFT */
+#define R_SHIFT(x) FIELD_PREP_CONST(GENMASK(28, 24), (x))
+#define G_SHIFT(x) FIELD_PREP_CONST(GENMASK(20, 16), (x))
+#define B_SHIFT(x) FIELD_PREP_CONST(GENMASK(12, 8), (x))
+#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
+
+/* LAYERPROPERTY */
+#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
+#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
+#define SOURCEBUFFERENABLE BIT(31)
+
+/* FRAMEDIMENSIONS */
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* CONTROL */
+#define INPUTSELECT_MASK GENMASK(4, 3)
+#define INPUTSELECT(x) FIELD_PREP(INPUTSELECT_MASK, (x))
+#define RASTERMODE_MASK GENMASK(2, 0)
+#define RASTERMODE(x) FIELD_PREP(RASTERMODE_MASK, (x))
+
+enum dc_yuvconversionmode {
+ YUVCONVERSIONMODE_OFF,
+};
+
+enum dc_inputselect {
+ INPUTSELECT_INACTIVE,
+};
+
+enum dc_rastermode {
+ RASTERMODE_NORMAL,
+};
+
+enum {
+ DC_FETCHUNIT_FL0,
+ DC_FETCHUNIT_FW2,
+};
+
+enum dc_fu_frac {
+ DC_FETCHUNIT_FRAC0,
+ DC_FETCHUNIT_FRAC1,
+ DC_FETCHUNIT_FRAC2,
+ DC_FETCHUNIT_FRAC3,
+ DC_FETCHUNIT_FRAC4,
+ DC_FETCHUNIT_FRAC5,
+ DC_FETCHUNIT_FRAC6,
+ DC_FETCHUNIT_FRAC7,
+ DC_FETCHUNIT_FRAC_NUM
+};
+
+struct dc_fu;
+struct dc_lb;
+
+struct dc_fu_ops {
+ void (*init)(struct dc_fu *fu);
+ void (*set_burstlength)(struct dc_fu *fu, dma_addr_t baddr);
+ void (*set_baseaddress)(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr);
+ void (*set_src_stride)(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride);
+ void (*set_src_buf_dimensions)(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h);
+ void (*set_fmt)(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format);
+ void (*enable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*disable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*set_framedimensions)(struct dc_fu *fu, int w, int h);
+ void (*set_layerblend)(struct dc_fu *fu, struct dc_lb *lb);
+ enum dc_link_id (*get_link_id)(struct dc_fu *fu);
+ const char *(*get_name)(struct dc_fu *fu);
+};
+
+struct dc_fu {
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ char name[21];
+ u32 reg_baseaddr[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferattributes[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferdimension[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layeroffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowoffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowdimensions[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_constantcolor[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layerproperty[DC_FETCHUNIT_FRAC_NUM];
+ unsigned int id;
+ enum dc_link_id link_id;
+ struct dc_fu_ops ops;
+ struct dc_lb *lb;
+};
+
+extern const struct dc_fu_ops dc_fu_common_ops;
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits);
+void dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts);
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask);
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp);
+void dc_fu_common_hw_init(struct dc_fu *fu);
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu);
+
+#endif /* __DC_FETCHUNIT_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-fw.c b/drivers/gpu/drm/imx/dc/dc-fw.c
new file mode 100644
index 000000000000..acb2d4d9e2ec
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+#define CONTROL 0x170
+
+struct dc_fw {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fw_info[] = {
+ { .reg_start = 0x56180a60, .id = 2, },
+};
+
+static const struct regmap_range dc_fw_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_fw_pec_regmap_access_table = {
+ .yes_ranges = dc_fw_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_fw_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_pec_regmap_access_table,
+ .rd_table = &dc_fw_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_fw_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_fw_regmap_access_table = {
+ .yes_ranges = dc_fw_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_regmap_ranges),
+};
+
+static const struct regmap_config dc_fw_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_regmap_access_table,
+ .rd_table = &dc_fw_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static void dc_fw_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, CONTROL, INPUTSELECT_MASK,
+ INPUTSELECT(INPUTSELECT_INACTIVE));
+ regmap_write_bits(fu->reg_cfg, CONTROL, RASTERMODE_MASK,
+ RASTERMODE(RASTERMODE_NORMAL));
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fw_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fw_init(struct dc_fu *fu)
+{
+ regmap_write(fu->reg_pec, PIXENGCFG_DYNAMIC, LINK_ID_NONE);
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fw_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fw_init;
+ fu->ops.set_fmt = dc_fw_set_fmt;
+ fu->ops.set_framedimensions = dc_fw_set_framedimensions;
+}
+
+static int dc_fw_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_fw *fw;
+ struct dc_fu *fu;
+ int i, id;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fu = &fw->fu;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_fw_pec_regmap_config);
+ if (IS_ERR(fu->reg_pec))
+ return PTR_ERR(fu->reg_pec);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fw_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fw_info, ARRAY_SIZE(dc_fw_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHWARP2;
+ fu->id = DC_FETCHUNIT_FW2;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchWarp%d", id);
+
+ dc_fw_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fw_ops = {
+ .bind = dc_fw_bind,
+};
+
+static int dc_fw_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fw_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fw_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fw_ops);
+}
+
+static const struct of_device_id dc_fw_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchwarp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fw_dt_ids);
+
+struct platform_driver dc_fw_driver = {
+ .probe = dc_fw_probe,
+ .remove = dc_fw_remove,
+ .driver = {
+ .name = "imx8-dc-fetchwarp",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fw_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-ic.c b/drivers/gpu/drm/imx/dc/dc-ic.c
new file mode 100644
index 000000000000..a270ae4030cd
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ic.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define USERINTERRUPTMASK(n) (0x8 + 4 * (n))
+#define INTERRUPTENABLE(n) (0x10 + 4 * (n))
+#define INTERRUPTPRESET(n) (0x18 + 4 * (n))
+#define INTERRUPTCLEAR(n) (0x20 + 4 * (n))
+#define INTERRUPTSTATUS(n) (0x28 + 4 * (n))
+#define USERINTERRUPTENABLE(n) (0x40 + 4 * (n))
+#define USERINTERRUPTPRESET(n) (0x48 + 4 * (n))
+#define USERINTERRUPTCLEAR(n) (0x50 + 4 * (n))
+#define USERINTERRUPTSTATUS(n) (0x58 + 4 * (n))
+
+#define IRQ_COUNT 49
+#define IRQ_RESERVED 35
+#define REG_NUM 2
+
+struct dc_ic_data {
+ struct regmap *regs;
+ struct clk *clk_axi;
+ int irq[IRQ_COUNT];
+ struct irq_domain *domain;
+};
+
+struct dc_ic_entry {
+ struct dc_ic_data *data;
+ int irq;
+};
+
+static const struct regmap_range dc_ic_regmap_write_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_write_table = {
+ .yes_ranges = dc_ic_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_read_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTENABLE(1)),
+ regmap_reg_range(INTERRUPTSTATUS(0), INTERRUPTSTATUS(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTENABLE(1)),
+ regmap_reg_range(USERINTERRUPTSTATUS(0), USERINTERRUPTSTATUS(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_read_table = {
+ .yes_ranges = dc_ic_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_volatile_ranges[] = {
+ regmap_reg_range(INTERRUPTPRESET(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTPRESET(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_volatile_table = {
+ .yes_ranges = dc_ic_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ic_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ic_regmap_write_table,
+ .rd_table = &dc_ic_regmap_read_table,
+ .volatile_table = &dc_ic_regmap_volatile_table,
+ .max_register = USERINTERRUPTSTATUS(1),
+};
+
+static void dc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct dc_ic_entry *entry = irq_desc_get_handler_data(desc);
+ struct dc_ic_data *data = entry->data;
+ unsigned int status, enable;
+ unsigned int virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ regmap_read(data->regs, USERINTERRUPTSTATUS(entry->irq / 32), &status);
+ regmap_read(data->regs, USERINTERRUPTENABLE(entry->irq / 32), &enable);
+
+ status &= enable;
+
+ if (status & BIT(entry->irq % 32)) {
+ virq = irq_find_mapping(data->domain, entry->irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static const unsigned long unused_irq[REG_NUM] = {0x00000000, 0xfffe0008};
+
+static int dc_ic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_chip_generic *gc;
+ struct dc_ic_entry *entry;
+ struct irq_chip_type *ct;
+ struct dc_ic_data *data;
+ void __iomem *base;
+ int i, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ entry = devm_kcalloc(dev, IRQ_COUNT, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to initialize reg\n");
+ return PTR_ERR(base);
+ }
+
+ data->regs = devm_regmap_init_mmio(dev, base, &dc_ic_regmap_config);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(data->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(data->clk_axi),
+ "failed to get AXI clock\n");
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM sync: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < REG_NUM; i++) {
+ /* mask and clear all interrupts */
+ regmap_write(data->regs, USERINTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, INTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, USERINTERRUPTCLEAR(i), 0xffffffff);
+ regmap_write(data->regs, INTERRUPTCLEAR(i), 0xffffffff);
+
+ /* set all interrupts to user mode */
+ regmap_write(data->regs, USERINTERRUPTMASK(i), 0xffffffff);
+ }
+
+ data->domain = irq_domain_add_linear(dev->of_node, IRQ_COUNT,
+ &irq_generic_chip_ops, data);
+ if (!data->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ pm_runtime_put(dev);
+ return -ENOMEM;
+ }
+ irq_domain_set_pm_device(data->domain, dev);
+
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, "DC",
+ handle_level_irq, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to alloc generic IRQ chips: %d\n", ret);
+ irq_domain_remove(data->domain);
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ for (i = 0; i < IRQ_COUNT; i += 32) {
+ gc = irq_get_domain_generic_chip(data->domain, i);
+ gc->reg_base = base;
+ gc->unused = unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(i / 32);
+ }
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ data->irq[i] = irq_of_parse_and_map(dev->of_node, i);
+
+ entry[i].data = data;
+ entry[i].irq = i;
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ dc_ic_irq_handler, &entry[i]);
+ }
+
+ return 0;
+}
+
+static void dc_ic_remove(struct platform_device *pdev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ if (i == IRQ_RESERVED)
+ continue;
+
+ irq_set_chained_handler_and_data(data->irq[i], NULL, NULL);
+ }
+
+ irq_domain_remove(data->domain);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int dc_ic_runtime_suspend(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->clk_axi);
+
+ return 0;
+}
+
+static int dc_ic_runtime_resume(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk_axi);
+ if (ret)
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dc_ic_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dc_ic_runtime_suspend, dc_ic_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_ic_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-intc", },
+ { /* sentinel */ }
+};
+
+struct platform_driver dc_ic_driver = {
+ .probe = dc_ic_probe,
+ .remove = dc_ic_remove,
+ .driver = {
+ .name = "imx8-dc-intc",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ic_dt_ids,
+ .pm = pm_sleep_ptr(&dc_ic_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.c b/drivers/gpu/drm/imx/dc/dc-kms.c
new file mode 100644
index 000000000000..2b18aa37a4a8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+
+static const struct drm_mode_config_funcs dc_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int dc_kms_init_encoder_per_crtc(struct dc_drm_device *dc_drm,
+ int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ struct drm_connector *connector;
+ struct device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_drm_of_get_bridge(dev, dc_crtc->de->tc->dev->of_node,
+ 0, 0);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret,
+ "failed to find bridge for CRTC%u\n",
+ crtc->index);
+ }
+
+ encoder = &dc_drm->encoder[crtc_index];
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret) {
+ dev_err(dev, "failed to initialize encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev,
+ "failed to attach bridge to encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev, "failed to init bridge connector for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ dev_err(dev,
+ "failed to attach encoder to connector for CRTC%u: %d\n",
+ crtc->index, ret);
+
+ return ret;
+}
+
+int dc_kms_init(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret, i;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 60;
+ drm->mode_config.min_height = 60;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &dc_drm_mode_config_funcs;
+
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = DC_FRAMEGEN_MAX_FRAME_INDEX;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_init(dc_drm, i);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init_encoder_per_crtc(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_post_init(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, DC_DISPLAYS);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank support: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+void dc_kms_uninit(struct dc_drm_device *dc_drm)
+{
+ drm_kms_helper_poll_fini(&dc_drm->base);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.h b/drivers/gpu/drm/imx/dc/dc-kms.h
new file mode 100644
index 000000000000..cd7860eff986
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_KMS_H__
+#define __DC_KMS_H__
+
+#include <linux/completion.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+#define DC_CRTC_IRQS 5
+
+struct dc_crtc_irq {
+ struct dc_crtc *dc_crtc;
+ unsigned int irq;
+};
+
+/**
+ * struct dc_crtc - DC specific drm_crtc
+ *
+ * Each display controller contains one content stream and one safety stream.
+ * In general, the two streams have the same functionality. One stream is
+ * overlaid on the other by @fg. This driver chooses to generate black constant
+ * color from the content stream as background color, build plane(s) on the
+ * content stream by using layerblend(s) and always generate a constant color
+ * from the safety stream. Note that due to the decoupled timing, the safety
+ * stream still works to show the constant color properly even when the content
+ * stream has completely hung up due to mal-function of this driver.
+ */
+struct dc_crtc {
+ /** @base: base drm_crtc structure */
+ struct drm_crtc base;
+ /** @de: display engine */
+ struct dc_de *de;
+ /** @cf_cont: content stream constframe */
+ struct dc_cf *cf_cont;
+ /** @cf_safe: safety stream constframe */
+ struct dc_cf *cf_safe;
+ /** @ed_cont: content stream extdst */
+ struct dc_ed *ed_cont;
+ /** @ed_safe: safety stream extdst */
+ struct dc_ed *ed_safe;
+ /** @fg: framegen */
+ struct dc_fg *fg;
+ /**
+ * @irq_dec_framecomplete:
+ *
+ * display engine configuration frame complete interrupt
+ */
+ unsigned int irq_dec_framecomplete;
+ /**
+ * @irq_dec_seqcomplete:
+ *
+ * display engine configuration sequence complete interrupt
+ */
+ unsigned int irq_dec_seqcomplete;
+ /**
+ * @irq_dec_shdload:
+ *
+ * display engine configuration shadow load interrupt
+ */
+ unsigned int irq_dec_shdload;
+ /**
+ * @irq_ed_cont_shdload:
+ *
+ * content stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_cont_shdload;
+ /**
+ * @irq_ed_safe_shdload:
+ *
+ * safety stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_safe_shdload;
+ /**
+ * @dec_seqcomplete_done:
+ *
+ * display engine configuration sequence completion
+ */
+ struct completion dec_seqcomplete_done;
+ /**
+ * @dec_shdload_done:
+ *
+ * display engine configuration shadow load completion
+ */
+ struct completion dec_shdload_done;
+ /**
+ * @ed_cont_shdload_done:
+ *
+ * content stream extdst shadow load completion
+ */
+ struct completion ed_cont_shdload_done;
+ /**
+ * @ed_safe_shdload_done:
+ *
+ * safety stream extdst shadow load completion
+ */
+ struct completion ed_safe_shdload_done;
+ /** @event: cached pending vblank event */
+ struct drm_pending_vblank_event *event;
+ /** @irqs: interrupt list */
+ struct dc_crtc_irq irqs[DC_CRTC_IRQS];
+};
+
+/**
+ * struct dc_plane - DC specific drm_plane
+ *
+ * Build a plane on content stream with a fetchunit and a layerblend.
+ */
+struct dc_plane {
+ /** @base: base drm_plane structure */
+ struct drm_plane base;
+ /** @fu: fetchunit */
+ struct dc_fu *fu;
+ /** @cf: content stream constframe */
+ struct dc_cf *cf;
+ /** @lb: layerblend */
+ struct dc_lb *lb;
+ /** @ed: content stream extdst */
+ struct dc_ed *ed;
+};
+
+#endif /* __DC_KMS_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
new file mode 100644
index 000000000000..38f966625d38
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_blend.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
+#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+
+#define STATICCONTROL 0x8
+#define SHDTOKSEL_MASK GENMASK(4, 3)
+#define SHDTOKSEL(x) FIELD_PREP(SHDTOKSEL_MASK, (x))
+#define SHDLDSEL_MASK GENMASK(2, 1)
+#define SHDLDSEL(x) FIELD_PREP(SHDLDSEL_MASK, (x))
+
+#define CONTROL 0xc
+#define CTRL_MODE_MASK BIT(0)
+#define CTRL_MODE(x) FIELD_PREP(CTRL_MODE_MASK, (x))
+
+#define BLENDCONTROL 0x10
+#define ALPHA_MASK GENMASK(23, 16)
+#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
+#define SEC_A_BLD_FUNC(x) \
+ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+
+#define POSITION 0x14
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
+#define YPOS_MASK GENMASK(31, 16)
+#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+
+enum dc_lb_blend_func {
+ DC_LAYERBLEND_BLEND_ZERO,
+ DC_LAYERBLEND_BLEND_ONE,
+ DC_LAYERBLEND_BLEND_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_CONST_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_CONST_ALPHA,
+};
+
+enum dc_lb_shadow_sel {
+ BOTH = 0x2,
+};
+
+static const struct dc_subdev_info dc_lb_info[] = {
+ { .reg_start = 0x56180ba0, .id = 0, },
+ { .reg_start = 0x56180bc0, .id = 1, },
+ { .reg_start = 0x56180be0, .id = 2, },
+ { .reg_start = 0x56180c00, .id = 3, },
+};
+
+static const struct regmap_range dc_lb_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_lb_pec_regmap_access_table = {
+ .yes_ranges = dc_lb_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_lb_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_pec_regmap_access_table,
+ .rd_table = &dc_lb_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_lb_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, POSITION),
+};
+
+static const struct regmap_access_table dc_lb_regmap_access_table = {
+ .yes_ranges = dc_lb_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_regmap_ranges),
+};
+
+static const struct regmap_config dc_lb_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_regmap_access_table,
+ .rd_table = &dc_lb_regmap_access_table,
+ .max_register = POSITION,
+};
+
+static const enum dc_link_id prim_sels[] = {
+ /* common options */
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LINK_ID_LAYERBLEND0,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND3,
+};
+
+static const enum dc_link_id sec_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_FETCHWARP2,
+ LINK_ID_FETCHLAYER0,
+};
+
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb)
+{
+ return lb->link;
+}
+
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim)
+{
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_PRIM_SEL_MASK,
+ PIXENGCFG_DYNAMIC_PRIM_SEL(prim));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid primary input selection:%d\n", prim);
+}
+
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_sels); i++) {
+ if (sec_sels[i] == sec) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_SEC_SEL_MASK,
+ PIXENGCFG_DYNAMIC_SEC_SEL(sec));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid secondary input selection:%d\n", sec);
+}
+
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken)
+{
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC, CLKEN_MASK,
+ CLKEN(clken));
+}
+
+static inline void dc_lb_enable_shden(struct dc_lb *lb)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_lb_shdtoksel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDTOKSEL_MASK,
+ SHDTOKSEL(sel));
+}
+
+static inline void dc_lb_shdldsel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDLDSEL_MASK,
+ SHDLDSEL(sel));
+}
+
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode)
+{
+ regmap_write_bits(lb->reg_cfg, CONTROL, CTRL_MODE_MASK, mode);
+}
+
+static inline void dc_lb_blendcontrol(struct dc_lb *lb)
+{
+ u32 val = PRIM_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ PRIM_C_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_C_BLD_FUNC(DC_LAYERBLEND_BLEND_CONST_ALPHA) |
+ ALPHA(DRM_BLEND_ALPHA_OPAQUE >> 8);
+
+ regmap_write(lb->reg_cfg, BLENDCONTROL, val);
+}
+
+void dc_lb_position(struct dc_lb *lb, int x, int y)
+{
+ regmap_write(lb->reg_cfg, POSITION, XPOS(x) | YPOS(y));
+}
+
+int dc_lb_get_id(struct dc_lb *lb)
+{
+ return lb->id;
+}
+
+void dc_lb_init(struct dc_lb *lb)
+{
+ dc_lb_pec_dynamic_prim_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_dynamic_sec_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_clken(lb, CLKEN_DISABLE);
+ dc_lb_shdldsel(lb, BOTH);
+ dc_lb_shdtoksel(lb, BOTH);
+ dc_lb_blendcontrol(lb);
+ dc_lb_enable_shden(lb);
+}
+
+static int dc_lb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_lb *lb;
+
+ lb = devm_kzalloc(dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ lb->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_lb_pec_regmap_config);
+ if (IS_ERR(lb->reg_pec))
+ return PTR_ERR(lb->reg_pec);
+
+ lb->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_lb_cfg_regmap_config);
+ if (IS_ERR(lb->reg_cfg))
+ return PTR_ERR(lb->reg_cfg);
+
+ lb->id = dc_subdev_get_id(dc_lb_info, ARRAY_SIZE(dc_lb_info), res_pec);
+ if (lb->id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", lb->id);
+ return lb->id;
+ }
+
+ lb->dev = dev;
+ lb->link = LINK_ID_LAYERBLEND0 + lb->id;
+
+ dc_drm->lb[lb->id] = lb;
+
+ return 0;
+}
+
+static const struct component_ops dc_lb_ops = {
+ .bind = dc_lb_bind,
+};
+
+static int dc_lb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_lb_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_lb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_lb_ops);
+}
+
+static const struct of_device_id dc_lb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-layerblend" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_lb_dt_ids);
+
+struct platform_driver dc_lb_driver = {
+ .probe = dc_lb_probe,
+ .remove = dc_lb_remove,
+ .driver = {
+ .name = "imx8-dc-layerblend",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_lb_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.c b/drivers/gpu/drm/imx/dc/dc-pe.c
new file mode 100644
index 000000000000..6676c22f3f45
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+static int dc_pe_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dc_drm_device *dc_drm = data;
+ struct dc_pe *pe;
+ int ret;
+
+ pe = devm_kzalloc(dev, sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(pe->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(pe->clk_axi),
+ "failed to get AXI clock\n");
+
+ pe->dev = dev;
+
+ dev_set_drvdata(dev, pe);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ dc_drm->pe = pe;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the pixel engine component first. To avoid the dependency, post bind to
+ * get the pointers from dc_drm in a safe manner.
+ */
+void dc_pe_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_pe *pe = dc_drm->pe;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ pe->cf_safe[i] = dc_drm->cf_safe[i];
+ pe->cf_cont[i] = dc_drm->cf_cont[i];
+ pe->ed_safe[i] = dc_drm->ed_safe[i];
+ pe->ed_cont[i] = dc_drm->ed_cont[i];
+ }
+
+ for (i = 0; i < DC_DISP_FU_CNT; i++)
+ pe->fu_disp[i] = dc_drm->fu_disp[i];
+
+ for (i = 0; i < DC_LB_CNT; i++)
+ pe->lb[i] = dc_drm->lb[i];
+}
+
+static const struct component_ops dc_pe_ops = {
+ .bind = dc_pe_bind,
+};
+
+static int dc_pe_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_pe_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_pe_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_pe_ops);
+}
+
+static int dc_pe_runtime_suspend(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pe->clk_axi);
+
+ return 0;
+}
+
+static int dc_pe_runtime_resume(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = clk_prepare_enable(pe->clk_axi);
+ if (ret) {
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_safe); i++)
+ dc_cf_init(pe->cf_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_cont); i++)
+ dc_cf_init(pe->cf_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_safe); i++)
+ dc_ed_init(pe->ed_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_cont); i++)
+ dc_ed_init(pe->ed_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->fu_disp); i++)
+ pe->fu_disp[i]->ops.init(pe->fu_disp[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->lb); i++)
+ dc_lb_init(pe->lb[i]);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_pe_pm_ops = {
+ RUNTIME_PM_OPS(dc_pe_runtime_suspend, dc_pe_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_pe_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-pixel-engine", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pe_dt_ids);
+
+struct platform_driver dc_pe_driver = {
+ .probe = dc_pe_probe,
+ .remove = dc_pe_remove,
+ .driver = {
+ .name = "imx8-dc-pixel-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_pe_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pe_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.h b/drivers/gpu/drm/imx/dc/dc-pe.h
new file mode 100644
index 000000000000..f5e01a6eb9e9
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_PIXEL_ENGINE_H__
+#define __DC_PIXEL_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+
+#define SHDEN BIT(0)
+
+#define CLKEN_MASK_SHIFT 24
+#define CLKEN_MASK (0x3 << CLKEN_MASK_SHIFT)
+#define CLKEN(n) ((n) << CLKEN_MASK_SHIFT)
+
+#define DC_DISP_FU_CNT 2
+#define DC_LB_CNT 4
+
+enum dc_link_id {
+ LINK_ID_NONE = 0x00,
+ LINK_ID_CONSTFRAME0 = 0x0c,
+ LINK_ID_CONSTFRAME4 = 0x0e,
+ LINK_ID_CONSTFRAME1 = 0x10,
+ LINK_ID_CONSTFRAME5 = 0x12,
+ LINK_ID_FETCHWARP2 = 0x14,
+ LINK_ID_FETCHLAYER0 = 0x1a,
+ LINK_ID_LAYERBLEND0 = 0x21,
+ LINK_ID_LAYERBLEND1 = 0x22,
+ LINK_ID_LAYERBLEND2 = 0x23,
+ LINK_ID_LAYERBLEND3 = 0x24,
+};
+
+enum dc_lb_mode {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+};
+
+enum dc_pec_clken {
+ CLKEN_DISABLE,
+ CLKEN_AUTOMATIC,
+};
+
+struct dc_cf {
+ struct regmap *reg_cfg;
+ enum dc_link_id link;
+};
+
+struct dc_ed {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int irq_shdload;
+};
+
+struct dc_lb {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int id;
+ enum dc_link_id link;
+};
+
+struct dc_pe {
+ struct device *dev;
+ struct clk *clk_axi;
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ struct dc_lb *lb[DC_LB_CNT];
+};
+
+/* Constant Frame Unit */
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf);
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w, unsigned int h);
+void dc_cf_constantcolor_black(struct dc_cf *cf);
+void dc_cf_constantcolor_blue(struct dc_cf *cf);
+void dc_cf_init(struct dc_cf *cf);
+
+/* External Destination Unit */
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src);
+void dc_ed_pec_sync_trigger(struct dc_ed *ed);
+void dc_ed_init(struct dc_ed *ed);
+
+/* Layer Blend Unit */
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb);
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim);
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec);
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken);
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode);
+void dc_lb_position(struct dc_lb *lb, int x, int y);
+int dc_lb_get_id(struct dc_lb *lb);
+void dc_lb_init(struct dc_lb *lb);
+
+#endif /* __DC_PIXEL_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
new file mode 100644
index 000000000000..d8b946fb90de
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/container_of.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-kms.h"
+
+#define DC_PLANE_MAX_PITCH 0x10000
+#define DC_PLANE_MAX_PIX_CNT 8192
+
+#define dc_plane_dbg(plane, fmt, ...) \
+do { \
+ struct drm_plane *_plane = (plane); \
+ drm_dbg_kms(_plane->dev, "[PLANE:%d:%s] " fmt, \
+ _plane->base.id, _plane->name, ##__VA_ARGS__); \
+} while (0)
+
+static const uint32_t dc_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_plane_funcs dc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct dc_plane *to_dc_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dc_plane, base);
+}
+
+static int dc_plane_check_max_source_resolution(struct drm_plane_state *state)
+{
+ int src_h = drm_rect_height(&state->src) >> 16;
+ int src_w = drm_rect_width(&state->src) >> 16;
+
+ if (src_w > DC_PLANE_MAX_PIX_CNT || src_h > DC_PLANE_MAX_PIX_CNT) {
+ dc_plane_dbg(state->plane, "invalid source resolution\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dc_plane_check_fb(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t baseaddr = drm_fb_dma_get_gem_addr(fb, state, 0);
+
+ /* base address alignment */
+ if (baseaddr & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad baddr alignment\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] range */
+ if (fb->pitches[0] > DC_PLANE_MAX_PITCH) {
+ dc_plane_dbg(state->plane, "fb pitches[0] is out of range\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] alignment */
+ if (fb->pitches[0] & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad pitches[0] alignment\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ /* ok to disable */
+ if (!plane_state->fb)
+ return 0;
+
+ if (!plane_state->crtc) {
+ dc_plane_dbg(plane, "no CRTC in plane state\n");
+ return -EINVAL;
+ }
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ if (ret) {
+ dc_plane_dbg(plane, "failed to check plane state: %d\n", ret);
+ return ret;
+ }
+
+ ret = dc_plane_check_max_source_resolution(plane_state);
+ if (ret)
+ return ret;
+
+ return dc_plane_check_fb(plane_state);
+}
+
+static void
+dc_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dc_plane *dplane = to_dc_plane(plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ const struct dc_fu_ops *fu_ops;
+ struct dc_lb *lb = dplane->lb;
+ struct dc_fu *fu = dplane->fu;
+ dma_addr_t baseaddr;
+ int src_w, src_h;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ src_w = drm_rect_width(&new_state->src) >> 16;
+ src_h = drm_rect_height(&new_state->src) >> 16;
+
+ baseaddr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ fu_ops = dc_fu_get_ops(dplane->fu);
+
+ fu_ops->set_layerblend(fu, lb);
+ fu_ops->set_burstlength(fu, baseaddr);
+ fu_ops->set_src_stride(fu, DC_FETCHUNIT_FRAC0, fb->pitches[0]);
+ fu_ops->set_src_buf_dimensions(fu, DC_FETCHUNIT_FRAC0, src_w, src_h);
+ fu_ops->set_fmt(fu, DC_FETCHUNIT_FRAC0, fb->format);
+ fu_ops->set_framedimensions(fu, src_w, src_h);
+ fu_ops->set_baseaddress(fu, DC_FETCHUNIT_FRAC0, baseaddr);
+ fu_ops->enable_src_buf(fu, DC_FETCHUNIT_FRAC0);
+
+ dc_plane_dbg(plane, "uses %s\n", fu_ops->get_name(fu));
+
+ dc_lb_pec_dynamic_prim_sel(lb, dc_cf_get_link_id(dplane->cf));
+ dc_lb_pec_dynamic_sec_sel(lb, fu_ops->get_link_id(fu));
+ dc_lb_mode(lb, LB_BLEND);
+ dc_lb_position(lb, new_state->dst.x1, new_state->dst.y1);
+ dc_lb_pec_clken(lb, CLKEN_AUTOMATIC);
+
+ dc_plane_dbg(plane, "uses LayerBlend%d\n", dc_lb_get_id(lb));
+
+ /* set ExtDst's source to LayerBlend */
+ dc_ed_pec_src_sel(dplane->ed, dc_lb_get_link_id(lb));
+
+ drm_dev_exit(idx);
+}
+
+static void dc_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dc_plane *dplane = to_dc_plane(plane);
+ const struct dc_fu_ops *fu_ops;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ /* disable fetchunit in shadow */
+ fu_ops = dc_fu_get_ops(dplane->fu);
+ fu_ops->disable_src_buf(dplane->fu, DC_FETCHUNIT_FRAC0);
+
+ /* set ExtDst's source to ConstFrame */
+ dc_ed_pec_src_sel(dplane->ed, dc_cf_get_link_id(dplane->cf));
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs dc_plane_helper_funcs = {
+ .atomic_check = dc_plane_atomic_check,
+ .atomic_update = dc_plane_atomic_update,
+ .atomic_disable = dc_plane_atomic_disable,
+};
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane)
+{
+ struct drm_plane *plane = &dc_plane->base;
+ int ret;
+
+ ret = drm_universal_plane_init(&dc_drm->base, plane, 0, &dc_plane_funcs,
+ dc_plane_formats,
+ ARRAY_SIZE(dc_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &dc_plane_helper_funcs);
+
+ dc_plane->fu = dc_drm->pe->fu_disp[plane->index];
+ dc_plane->cf = dc_drm->pe->cf_cont[plane->index];
+ dc_plane->lb = dc_drm->pe->lb[plane->index];
+ dc_plane->ed = dc_drm->pe->ed_cont[plane->index];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-tc.c b/drivers/gpu/drm/imx/dc/dc-tc.c
new file mode 100644
index 000000000000..0bfd381b2cea
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-tc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-de.h"
+
+#define TCON_CTRL 0x410
+#define CTRL_RST_VAL 0x01401408
+
+/* red: MAPBIT 29-20, green: MAPBIT 19-10, blue: MAPBIT 9-0 */
+#define MAPBIT3_0 0x418
+#define MAPBIT7_4 0x41c
+#define MAPBIT11_8 0x420
+#define MAPBIT15_12 0x424
+#define MAPBIT19_16 0x428
+#define MAPBIT23_20 0x42c
+#define MAPBIT27_24 0x430
+#define MAPBIT31_28 0x434
+
+static const struct dc_subdev_info dc_tc_info[] = {
+ { .reg_start = 0x5618c800, .id = 0, },
+ { .reg_start = 0x5618e400, .id = 1, },
+};
+
+static const struct regmap_range dc_tc_regmap_ranges[] = {
+ regmap_reg_range(TCON_CTRL, TCON_CTRL),
+ regmap_reg_range(MAPBIT3_0, MAPBIT31_28),
+};
+
+static const struct regmap_access_table dc_tc_regmap_access_table = {
+ .yes_ranges = dc_tc_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_tc_regmap_ranges),
+};
+
+static const struct regmap_config dc_tc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_tc_regmap_access_table,
+ .rd_table = &dc_tc_regmap_access_table,
+ .max_register = MAPBIT31_28,
+};
+
+/*
+ * The pixels reach TCON are always in 30-bit BGR format.
+ * The first bridge always receives pixels in 30-bit RGB format.
+ * So, map the format to MEDIA_BUS_FMT_RGB101010_1X30.
+ */
+static const u32 dc_tc_mapbit[] = {
+ 0x17161514, 0x1b1a1918, 0x0b0a1d1c, 0x0f0e0d0c,
+ 0x13121110, 0x03020100, 0x07060504, 0x00000908,
+};
+
+void dc_tc_init(struct dc_tc *tc)
+{
+ /* reset TCON_CTRL to POR default so that TCON works in bypass mode */
+ regmap_write(tc->reg, TCON_CTRL, CTRL_RST_VAL);
+
+ /* set format */
+ regmap_bulk_write(tc->reg, MAPBIT3_0, dc_tc_mapbit,
+ ARRAY_SIZE(dc_tc_mapbit));
+}
+
+static int dc_tc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_tc *tc;
+ int id;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tc->reg = devm_regmap_init_mmio(dev, base, &dc_tc_regmap_config);
+ if (IS_ERR(tc->reg))
+ return PTR_ERR(tc->reg);
+
+ id = dc_subdev_get_id(dc_tc_info, ARRAY_SIZE(dc_tc_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ tc->dev = dev;
+ dc_drm->tc[id] = tc;
+
+ return 0;
+}
+
+static const struct component_ops dc_tc_ops = {
+ .bind = dc_tc_bind,
+};
+
+static int dc_tc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_tc_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_tc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_tc_ops);
+}
+
+static const struct of_device_id dc_tc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-tcon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_tc_dt_ids);
+
+struct platform_driver dc_tc_driver = {
+ .probe = dc_tc_probe,
+ .remove = dc_tc_remove,
+ .driver = {
+ .name = "imx8-dc-tcon",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_tc_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 9e66eb77b1eb..6d8325c76697 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
}
static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
- return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 20b93fff0239..9db1ceaed518 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder);
- return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ return drm_bridge_attach(encoder, ib->next_bridge,
&ib->bridge, flags);
}
@@ -900,14 +901,15 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
static struct drm_framebuffer *
ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
if (priv->soc_info->map_noncoherent)
- return drm_gem_fb_create_with_dirty(drm, file, mode_cmd);
+ return drm_gem_fb_create_with_dirty(drm, file, info, mode_cmd);
- return drm_gem_fb_create(drm, file, mode_cmd);
+ return drm_gem_fb_create(drm, file, info, mode_cmd);
}
static struct drm_gem_object *
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index 31b5a3e21911..0e9dba1ef4af 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 11ace5cebf4c..65210ab081bb 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -362,8 +362,8 @@ static const struct bin_attribute lima_error_state_attr = {
.attr.name = "error",
.attr.mode = 0600,
.size = 0,
- .read_new = lima_error_state_read,
- .write_new = lima_error_state_write,
+ .read = lima_error_state_read,
+ .write = lima_error_state_write,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 9bb997dbb4b9..9722b847a539 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
mapping_set_unevictable(mapping);
}
@@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(&bo->base, map);
+ return drm_gem_shmem_vmap_locked(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
@@ -341,7 +341,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err = lima_sched_task_init(
submit->task, submit->ctx->context + submit->pipe,
- bos, submit->nr_bos, vm);
+ bos, submit->nr_bos, vm, file->client_id);
if (err)
goto err_out1;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 825135a26aa4..739e8c6c6d90 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -113,7 +113,8 @@ static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sch
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm)
+ struct lima_vm *vm,
+ u64 drm_client_id)
{
int err, i;
@@ -124,7 +125,8 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, 1, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm,
+ drm_client_id);
if (err) {
kfree(task->bos);
return err;
@@ -371,7 +373,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +381,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
@@ -410,7 +412,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -427,7 +429,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -465,7 +467,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void lima_sched_free_job(struct drm_sched_job *job)
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 85b23ba901d5..1a08faf8a529 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -88,7 +88,8 @@ struct lima_sched_pipe {
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm);
+ struct lima_vm *vm,
+ u64 drm_client_id);
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a349d10304e..98a7fb2fa00e 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -14,21 +14,19 @@ DECLARE_EVENT_CLASS(lima_task,
TP_PROTO(struct lima_sched_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
- __field(uint64_t, task_id)
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(pipe, task->base.sched->name)
),
TP_fast_assign(
- __entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
__assign_str(pipe);
),
- TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
- __entry->task_id, __entry->context, __entry->seqno,
+ TP_printk("context=%u seqno=%u pipe=%s",
+ __entry->context, __entry->seqno,
__get_str(pipe))
);
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index 552edfec7afb..d739d51cf54c 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -2,7 +2,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 395449a72f0a..a3423459dd7a 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge)
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
@@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
+ return drm_bridge_attach(encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
@@ -1137,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->bridge_out = bridge;
/* Create a bridge for this DSI channel */
- d->bridge.funcs = &mcde_dsi_bridge_funcs;
d->bridge.of_node = dev->of_node;
drm_bridge_add(&d->bridge);
@@ -1173,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev)
u32 dsi_id;
int ret;
- d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
+ d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
d->dev = dev;
platform_set_drvdata(pdev, d);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 32a2ed6c0cfe..43afd0a26d14 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -21,10 +21,8 @@ mediatek-drm-y := mtk_crtc.o \
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
-mediatek-drm-hdmi-objs := mtk_cec.o \
- mtk_hdmi.o \
- mtk_hdmi_ddc.o
-
-obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b42c0d87eba3..c7be530ca041 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
@@ -102,6 +101,7 @@ void mtk_cec_set_hpd_event(struct device *dev,
cec->hpd_event = hpd_event;
spin_unlock_irqrestore(&cec->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_set_hpd_event, "DRM_MTK_HDMI_V1");
bool mtk_cec_hpd_high(struct device *dev)
{
@@ -112,6 +112,7 @@ bool mtk_cec_hpd_high(struct device *dev)
return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_hpd_high, "DRM_MTK_HDMI_V1");
static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
{
@@ -247,3 +248,7 @@ struct platform_driver mtk_cec_driver = {
.of_match_table = mtk_cec_of_ids,
},
};
+module_platform_driver(mtk_cec_driver);
+
+MODULE_DESCRIPTION("MediaTek HDMI CEC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 8f6fba4217ec..bc7527542fdc 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
return 0;
}
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
+ int i;
+
+ /* no need to wait for disabling the plane by CPU */
+ if (!mtk_crtc->cmdq_client.chan)
+ return;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ /* set pending plane state to disabled */
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
+
+ if (mtk_plane->index == plane->index) {
+ memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
+ break;
+ }
+ }
+ mtk_crtc_update_config(mtk_crtc, false);
+
+ /* wait for planes to be disabled by CMDQ */
+ wait_event_timeout(mtk_crtc->cb_blocking_queue,
+ mtk_crtc->cmdq_vblank_cnt == 0,
+ msecs_to_jiffies(500));
+#endif
+}
+
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp), i);
+ mtk_ddp_comp_get_num_formats(comp),
+ mtk_ddp_comp_is_afbc_supported(comp), i);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
index 388e900b6f4d..828f109b83e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
@@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
unsigned int num_conn_routes);
int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane);
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *plane_state);
struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index edc6417639e6..ac6620e10262 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
+ .is_afbc_supported = mtk_ovl_is_afbc_supported,
};
static const struct mtk_ddp_comp_funcs ddp_postmask = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 39720b27f4e9..7289b3dcf22f 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs {
u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
+ bool (*is_afbc_supported)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*add)(struct device *dev, struct mtk_mutex *mutex);
@@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp)
return 0;
}
+static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->is_afbc_supported)
+ return comp->funcs->is_afbc_supported(comp->dev);
+
+ return false;
+}
+
static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex)
{
if (comp->funcs && comp->funcs->add) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 04217a36939c..679d413bf10b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev);
u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
+bool mtk_ovl_is_afbc_supported(struct device *dev);
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index d0581c4e3c99..e0236353d499 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev)
return ovl->data->num_formats;
}
+bool mtk_ovl_is_afbc_supported(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->supports_afbc;
+}
+
int mtk_ovl_clk_enable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index fa0e95dd29a0..fe97bb97e004 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -492,11 +492,6 @@ static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{ /* sentinel */ }
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
enum mtk_ovl_adaptor_comp_type *ctype)
{
@@ -567,7 +562,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
- drm_of_component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, component_compare_of, node);
dev_dbg(dev, "Adding component match for %pOF\n", node);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index fed3307d3374..bef6eeb30d3e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2118,7 +2118,8 @@ static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
mutex_unlock(&mtk_dp->update_plugged_status_lock);
}
-static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
@@ -2287,6 +2288,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
@@ -2310,7 +2312,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
goto err_aux_register;
if (mtk_dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ ret = drm_bridge_attach(encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
@@ -2568,7 +2570,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
static void mtk_dp_debounce_timer(struct timer_list *t)
{
- struct mtk_dp *mtk_dp = from_timer(mtk_dp, t, debounce_timer);
+ struct mtk_dp *mtk_dp = timer_container_of(mtk_dp, t, debounce_timer);
mtk_dp->need_debounce = true;
}
@@ -2724,9 +2726,10 @@ static int mtk_dp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
- if (!mtk_dp)
- return -ENOMEM;
+ mtk_dp = devm_drm_bridge_alloc(dev, struct mtk_dp, bridge,
+ &mtk_dp_bridge_funcs);
+ if (IS_ERR(mtk_dp))
+ return PTR_ERR(mtk_dp);
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
@@ -2784,7 +2787,6 @@ static int mtk_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
- mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0fd13e6dd3f1..61cab32e213a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -59,7 +59,8 @@ enum mtk_dpi_out_channel_swap {
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_YCBCR_422
+ MTK_DPI_COLOR_FORMAT_YCBCR_422,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444
};
struct mtk_dpi {
@@ -450,9 +451,16 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
- if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
+ switch (format) {
+ case MTK_DPI_COLOR_FORMAT_YCBCR_444:
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, true);
+ if (dpi->conf->swap_input_support)
+ mtk_dpi_config_swap_input(dpi, false);
+ break;
+ case MTK_DPI_COLOR_FORMAT_YCBCR_422:
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
@@ -463,11 +471,14 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
INT_MATRIX_SEL_MASK);
- } else {
+ break;
+ default:
+ case MTK_DPI_COLOR_FORMAT_RGB:
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
if (dpi->conf->swap_input_support)
mtk_dpi_config_swap_input(dpi, false);
+ break;
}
}
@@ -734,6 +745,65 @@ static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static unsigned int mtk_dpi_bus_fmt_bit_num(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ return MTK_DPI_OUT_BIT_NUM_8BITS;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_BIT_NUM_10BITS;
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_BIT_NUM_12BITS;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_channel_swap(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_CHANNEL_SWAP_BGR;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_color_format(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ return MTK_DPI_COLOR_FORMAT_RGB;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_422;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_444;
+ }
+}
+
static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -753,18 +823,16 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
bridge_state->output_bus_cfg.format);
dpi->output_fmt = out_bus_format;
- dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
- dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->bit_num = mtk_dpi_bus_fmt_bit_num(out_bus_format);
+ dpi->channel_swap = mtk_dpi_bus_fmt_channel_swap(out_bus_format);
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
- dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
- else
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+ dpi->color_format = mtk_dpi_bus_fmt_color_format(out_bus_format);
return 0;
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
@@ -783,7 +851,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
"Failed to get bridge\n");
}
- return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
@@ -1026,9 +1094,29 @@ static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
-static const u32 mt8195_output_fmts[] = {
+static const u32 mt8195_dpi_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
+};
+
+static const u32 mt8195_dp_intf_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
};
static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
@@ -1141,8 +1229,8 @@ static const struct mtk_dpi_conf mt8192_conf = {
static const struct mtk_dpi_conf mt8195_conf = {
.max_clock_khz = 594000,
- .output_fmts = mt8183_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .output_fmts = mt8195_dpi_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dpi_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
@@ -1161,8 +1249,8 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.dpi_factor = dpi_factor_mt8195_dp_intf,
.num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ .output_fmts = mt8195_dp_intf_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dp_intf_output_fmts),
.pixels_per_iter = 4,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
@@ -1178,9 +1266,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct mtk_dpi *dpi;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(dev, struct mtk_dpi, bridge,
+ &mtk_dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
@@ -1232,7 +1321,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpi);
- dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74158b9d6503..d5e6bab36414 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -43,14 +43,13 @@ static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
static struct drm_framebuffer *
mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, cmd);
-
if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
- return drm_gem_fb_create(dev, file, cmd);
+ return drm_gem_fb_create(dev, file, info, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
@@ -470,7 +469,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- goto put_mutex_dev;
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -488,8 +487,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
for (i = 0; i < private->data->mmsys_dev_num; i++) {
drm->dev_private = private->all_drm_private[i];
ret = component_bind_all(private->all_drm_private[i]->dev, drm);
- if (ret)
- goto put_mutex_dev;
+ if (ret) {
+ while (--i >= 0)
+ component_unbind_all(private->all_drm_private[i]->dev, drm);
+ return ret;
+ }
}
/*
@@ -582,9 +584,6 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_component_unbind:
for (i = 0; i < private->data->mmsys_dev_num; i++)
component_unbind_all(private->all_drm_private[i]->dev, drm);
-put_mutex_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++)
- put_device(private->all_drm_private[i]->mutex_dev);
return ret;
}
@@ -655,8 +654,10 @@ static int mtk_drm_bind(struct device *dev)
return 0;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
+ goto err_put_dev;
+ }
private->drm_master = true;
drm->dev_private = private;
@@ -682,18 +683,31 @@ err_free:
drm_dev_put(drm);
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
+err_put_dev:
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
return ret;
}
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
+ int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
+
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
private->drm_master = false;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d1f407fb7eb1..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
@@ -1195,9 +1196,10 @@ static int mtk_dsi_probe(struct platform_device *pdev)
int irq_num;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct mtk_dsi, bridge,
+ &mtk_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->driver_data = of_device_get_match_data(dev);
@@ -1245,7 +1247,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dsi);
- dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 06e4fac152b7..845fd8aa43c3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -31,7 +31,6 @@
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
@@ -165,7 +164,7 @@ struct mtk_hdmi {
bool dvi_mode;
struct regmap *sys_regmap;
unsigned int sys_offset;
- void __iomem *regs;
+ struct regmap *regs;
struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
@@ -181,50 +180,10 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
return container_of(b, struct mtk_hdmi, bridge);
}
-static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
-{
- return readl(hdmi->regs + offset);
-}
-
-static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
-{
- writel(val, hdmi->regs + offset);
-}
-
-static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
- VIDEO_SOURCE_SEL);
+ regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
+ VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -259,12 +218,12 @@ static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_set_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_clear_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
@@ -273,25 +232,25 @@ static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
HDMI_RST, HDMI_RST);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, 0);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_clear_bits(hdmi->regs, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
ANLG_ON, ANLG_ON);
}
static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
{
- mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
- CFG2_NOTICE_EN);
+ regmap_update_bits(hdmi->regs, GRL_CFG2, CFG2_NOTICE_EN,
+ enable_notice ? CFG2_NOTICE_EN : 0);
}
static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
{
- mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+ regmap_write(hdmi->regs, GRL_INT_MASK, int_mask);
}
static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+ regmap_update_bits(hdmi->regs, GRL_CFG1, CFG1_DVI, enable ? CFG1_DVI : 0);
}
static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
@@ -337,22 +296,22 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
return;
}
- mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+ regmap_clear_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
+ regmap_write(hdmi->regs, GRL_INFOFRM_TYPE, frame_type);
+ regmap_write(hdmi->regs, GRL_INFOFRM_VER, frame_ver);
+ regmap_write(hdmi->regs, GRL_INFOFRM_LNG, frame_len);
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, checksum);
for (i = 0; i < frame_len; i++)
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, frame_data[i]);
- mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ regmap_set_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
}
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
- AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
+ GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
@@ -373,44 +332,44 @@ static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
usleep_range(2000, 4000);
- mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_set_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
}
static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN);
usleep_range(2000, 4000);
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_SET);
}
static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
- CTS_CTRL_SOFT);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, CTS_CTRL_SOFT,
+ on ? 0 : CTS_CTRL_SOFT);
}
static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
- NCTS_WRI_ANYTIME);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, NCTS_WRI_ANYTIME,
+ enable ? NCTS_WRI_ANYTIME : 0);
}
static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CFG4_MHL_MODE);
if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
mode->clock == 74250 &&
mode->vdisplay == 1080)
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
else
- mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_set_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
}
static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
@@ -438,7 +397,7 @@ static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
swap_bit = LFE_CC_SWAP;
break;
}
- mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+ regmap_update_bits(hdmi->regs, GRL_CH_SWAP, 0xff, swap_bit);
}
static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
@@ -459,7 +418,7 @@ static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
break;
}
- mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+ regmap_update_bits(hdmi->regs, GRL_AOUT_CFG, AOUT_BNUM_SEL_MASK, val);
}
static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
@@ -467,7 +426,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ regmap_read(hdmi->regs, GRL_CFG0, &val);
val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
switch (i2s_fmt) {
@@ -491,7 +450,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG0, val);
+ regmap_write(hdmi->regs, GRL_CFG0, val);
}
static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
@@ -500,14 +459,14 @@ static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
u8 val;
/* Disable high bitrate, set DST packet normal/double */
- mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+ regmap_clear_bits(hdmi->regs, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
if (dst)
val = DST_NORMAL_DOUBLE | SACD_DST;
else
val = 0;
- mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+ regmap_update_bits(hdmi->regs, GRL_AUDIO_CFG, mask, val);
}
static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
@@ -548,10 +507,10 @@ static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
i2s_uv = I2S_UV_CH_EN(0);
}
- mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
- mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+ regmap_write(hdmi->regs, GRL_CH_SW0, ch_switch & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ regmap_write(hdmi->regs, GRL_I2S_UV, i2s_uv);
}
static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
@@ -559,7 +518,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ regmap_read(hdmi->regs, GRL_CFG1, &val);
if (input_type == HDMI_AUD_INPUT_I2S &&
(val & CFG1_SPDIF) == CFG1_SPDIF) {
val &= ~CFG1_SPDIF;
@@ -567,7 +526,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
(val & CFG1_SPDIF) == 0) {
val |= CFG1_SPDIF;
}
- mtk_hdmi_write(hdmi, GRL_CFG1, val);
+ regmap_write(hdmi->regs, GRL_CFG1, val);
}
static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
@@ -576,13 +535,13 @@ static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
int i;
for (i = 0; i < 5; i++) {
- mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, channel_status[i]);
}
for (; i < 24; i++) {
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, 0);
}
}
@@ -590,13 +549,13 @@ static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
if (val & MIX_CTRL_SRC_EN) {
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
usleep_range(255, 512);
val |= MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
}
}
@@ -604,10 +563,10 @@ static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
- mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_SHIFT_L1, 0x00);
}
static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
@@ -615,7 +574,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ regmap_read(hdmi->regs, GRL_CFG5, &val);
val &= CFG5_CD_RATIO_MASK;
switch (mclk) {
@@ -638,7 +597,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
val |= CFG5_FS256;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG5, val);
+ regmap_write(hdmi->regs, GRL_CFG5, val);
}
struct hdmi_acr_n {
@@ -716,15 +675,22 @@ static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
128 * audio_sample_rate);
}
+static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts)
+{
+ *n = hdmi_recommended_n(sample_rate, clock);
+ *cts = hdmi_expected_cts(sample_rate, clock, *n);
+}
+
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
unsigned char val[NCTS_BYTES];
int i;
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
memset(val, 0, sizeof(val));
val[0] = (cts >> 24) & 0xff;
@@ -737,7 +703,7 @@ static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
val[6] = n & 0xff;
for (i = 0; i < NCTS_BYTES; i++)
- mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+ regmap_write(hdmi->regs, GRL_NCTS, val[i]);
}
static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
@@ -746,14 +712,12 @@ static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
{
unsigned int n, cts;
- n = hdmi_recommended_n(sample_rate, clock);
- cts = hdmi_expected_cts(sample_rate, clock, n);
+ mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts);
dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
__func__, sample_rate, clock, n, cts);
- mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
- AUDIO_I2S_NCTS_SEL);
+ regmap_update_bits(hdmi->regs, DUMMY_304, AUDIO_I2S_NCTS_SEL, AUDIO_I2S_NCTS_SEL_64);
do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
}
@@ -873,7 +837,7 @@ static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
bool dst;
mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
- mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+ regmap_set_bits(hdmi->regs, GRL_MIX_CTRL, MIX_CTRL_FLAT);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
@@ -905,7 +869,7 @@ static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
mtk_hdmi_hw_ncts_enable(hdmi, false);
mtk_hdmi_hw_aud_src_disable(hdmi);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_ACLK_INV);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
switch (sample_rate) {
@@ -1061,20 +1025,6 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
-{
- struct hdmi_audio_param *aud_param = &hdmi->aud_param;
-
- aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
- aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
-
- return 0;
-}
-
static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, true);
@@ -1087,20 +1037,6 @@ static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
hdmi->audio_enable = false;
}
-static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
- struct hdmi_audio_param *param)
-{
- if (!hdmi->audio_enable) {
- dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
- return -EINVAL;
- }
- dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
- param->aud_codec, param->aud_input_type,
- param->aud_input_chan_type, param->codec_params.sample_rate);
- memcpy(&hdmi->aud_param, param, sizeof(*param));
- return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
-}
-
static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
@@ -1238,7 +1174,8 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1269,6 +1206,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1281,7 +1219,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
}
if (hdmi->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
@@ -1407,30 +1345,20 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.edid_read = mtk_hdmi_bridge_edid_read,
};
-static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
- struct platform_device *pdev)
+static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
- struct regmap *regmap;
+ struct device_node *cec_np;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clocks: %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
/* The CEC module handles HDMI hotplug detection */
cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
- if (!cec_np) {
- dev_err(dev, "Failed to find CEC node\n");
- return -EINVAL;
- }
+ if (!cec_np)
+ return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n");
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
@@ -1440,82 +1368,77 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
return -EPROBE_DEFER;
}
of_node_put(cec_np);
- hdmi->cec_dev = &cec_pdev->dev;
/*
* The mediatek,syscon-hdmi property contains a phandle link to the
* MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
* registers it contains.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
- ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
- &hdmi->sys_offset);
- if (IS_ERR(regmap))
- ret = PTR_ERR(regmap);
- if (ret) {
- dev_err(dev,
- "Failed to get system configuration registers: %d\n",
- ret);
- goto put_device;
- }
- hdmi->sys_regmap = regmap;
+ hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
+ 1, &hdmi->sys_offset);
+ if (IS_ERR(hdmi->sys_regmap))
+ return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
+ "Failed to get system configuration registers\n");
- hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs)) {
- ret = PTR_ERR(hdmi->regs);
- goto put_device;
- }
+ hdmi->cec_dev = &cec_pdev->dev;
+ return 0;
+}
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote, *i2c_np;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ hdmi->regs = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote) {
- ret = -EINVAL;
- goto put_device;
- }
+ if (!remote)
+ return -EINVAL;
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- ret = -EPROBE_DEFER;
- goto put_device;
+ return -EPROBE_DEFER;
}
}
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- if (!i2c_np) {
- dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
- remote);
- of_node_put(remote);
- ret = -EINVAL;
- goto put_device;
- }
of_node_put(remote);
+ if (!i2c_np)
+ return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
- if (!hdmi->ddc_adpt) {
- dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- ret = -EINVAL;
- goto put_device;
- }
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
+
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret)
+ return ret;
return 0;
-put_device:
- put_device(hdmi->cec_dev);
- return ret;
}
/*
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_param hdmi_params;
+ struct hdmi_audio_param aud_params = { 0 };
unsigned int chan = params->cea.channels;
dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
@@ -1526,16 +1449,16 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (chan) {
case 2:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
break;
case 4:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
break;
case 6:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
break;
case 8:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
break;
default:
dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
@@ -1559,27 +1482,45 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
- hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
return -EINVAL;
}
+ memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
+ memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
+
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ aud_params.aud_codec, aud_params.aud_input_type,
+ aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
- memcpy(&hdmi_params.codec_params, params,
- sizeof(hdmi_params.codec_params));
+ return 0;
+}
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+
+ mtk_hdmi_audio_params(hdmi, daifmt, params);
+ mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
return 0;
}
@@ -1625,17 +1566,22 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
-static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
+static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
- struct mtk_hdmi *hdmi = data;
-
mutex_lock(&hdmi->update_plugged_status_lock);
hdmi->plugged_cb = fn;
hdmi->codec_dev = codec_dev;
mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+ mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev);
mtk_hdmi_update_plugged_status(hdmi);
return 0;
@@ -1658,6 +1604,7 @@ static void mtk_hdmi_unregister_audio_driver(void *data)
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
@@ -1667,6 +1614,13 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
};
int ret;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
@@ -1689,9 +1643,10 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
+ &mtk_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->dev = dev;
hdmi->conf = of_device_get_match_data(dev);
@@ -1708,17 +1663,11 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
- ret = mtk_hdmi_output_init(hdmi);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to initialize hdmi output\n");
-
ret = mtk_hdmi_register_audio_driver(dev);
if (ret)
return dev_err_probe(dev, ret,
"Failed to register audio driver\n");
- hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
@@ -1789,28 +1738,9 @@ static struct platform_driver mtk_hdmi_driver = {
.pm = &mtk_hdmi_pm_ops,
},
};
-
-static struct platform_driver * const mtk_hdmi_drivers[] = {
- &mtk_hdmi_ddc_driver,
- &mtk_cec_driver,
- &mtk_hdmi_driver,
-};
-
-static int __init mtk_hdmitx_init(void)
-{
- return platform_register_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-static void __exit mtk_hdmitx_exit(void)
-{
- platform_unregister_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-module_init(mtk_hdmitx_init);
-module_exit(mtk_hdmitx_exit);
+module_platform_driver(mtk_hdmi_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DRM_MTK_HDMI_V1");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
deleted file mode 100644
index 472bf141c92b..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-#ifndef _MTK_HDMI_CTRL_H
-#define _MTK_HDMI_CTRL_H
-
-struct platform_driver;
-
-extern struct platform_driver mtk_cec_driver;
-extern struct platform_driver mtk_hdmi_ddc_driver;
-
-#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 07db68067844..6358e1af69b4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -20,7 +20,6 @@
#include <linux/of_platform.h>
#include "mtk_drm_drv.h"
-#include "mtk_hdmi.h"
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
@@ -337,6 +336,7 @@ struct platform_driver mtk_hdmi_ddc_driver = {
.of_match_table = mtk_hdmi_ddc_match,
},
};
+module_platform_driver(mtk_hdmi_ddc_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 655106bbb76d..cbc4f37da8ba 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+
mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
+
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
@@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx)
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx)
{
int err;
@@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
- num_formats, modifiers, type, NULL);
+ num_formats,
+ supports_afbc ? modifiers : NULL,
+ type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 3b13b89989c7..95c5fa5295d8 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx);
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index e79f7c3ce32e..dc374bfc5951 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
- return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
@@ -226,9 +227,12 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
- if (!meson_encoder_cvbs)
- return -ENOMEM;
+ meson_encoder_cvbs = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_cvbs,
+ bridge,
+ &meson_encoder_cvbs_bridge_funcs);
+ if (IS_ERR(meson_encoder_cvbs))
+ return PTR_ERR(meson_encoder_cvbs);
/* CVBS Connector Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
@@ -244,7 +248,6 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
"Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
- meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index fe204437bd65..6c6624f9ba24 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -33,11 +33,12 @@ struct meson_encoder_dsi {
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
@@ -105,9 +106,12 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
- if (!meson_encoder_dsi)
- return -ENOMEM;
+ meson_encoder_dsi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_dsi,
+ bridge,
+ &meson_encoder_dsi_bridge_funcs);
+ if (IS_ERR(meson_encoder_dsi))
+ return PTR_ERR(meson_encoder_dsi);
/* DSI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
@@ -122,7 +126,6 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
"Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
- meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 7752d8ac85f0..8205ee56a691 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -49,11 +49,12 @@ struct meson_encoder_hdmi {
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
@@ -75,7 +76,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
unsigned long long venc_freq;
unsigned long long hdmi_freq;
- vclk_freq = mode->clock * 1000;
+ vclk_freq = mode->clock * 1000ULL;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@@ -108,7 +109,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
venc_freq /= 2;
dev_dbg(priv->dev,
- "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
+ "phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -123,7 +124,7 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned long long clock = mode->clock * 1000;
+ unsigned long long clock = mode->clock * 1000ULL;
unsigned long long phy_freq;
unsigned long long vclk_freq;
unsigned long long venc_freq;
@@ -375,9 +376,12 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
- if (!meson_encoder_hdmi)
- return -ENOMEM;
+ meson_encoder_hdmi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_hdmi,
+ bridge,
+ &meson_encoder_hdmi_bridge_funcs);
+ if (IS_ERR(meson_encoder_hdmi))
+ return PTR_ERR(meson_encoder_hdmi);
/* HDMI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
@@ -394,7 +398,6 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
}
/* HDMI Encoder Bridge */
- meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
meson_encoder_hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 3325580d885d..dfe0c28a0f05 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,10 +110,7 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define PIXEL_FREQ_1000_1001(_freq) \
- DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
-#define PHY_FREQ_1000_1001(_freq) \
- (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
/* VID PLL Dividers */
enum {
@@ -772,6 +769,36 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
+{
+ DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].vclk_freq,
+ FREQ_1000_1001(params[idx].vclk_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].phy_freq,
+ FREQ_1000_1001(params[idx].phy_freq));
+
+ /* Match strict frequency */
+ if (phy_freq == params[idx].phy_freq &&
+ vclk_freq == params[idx].vclk_freq)
+ return true;
+
+ /* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
+ * (drm EDID is defined in 1kHz steps, so everything smaller must be
+ * rounding error) and the PHY freq deviation has to be less than
+ * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
+ * smaller must be rounding error as well).
+ */
+ if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
+ abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
+ return true;
+
+ /* no match */
+ return false;
+}
+
enum drm_mode_status
meson_vclk_vic_supported_freq(struct meson_drm *priv,
unsigned long long phy_freq,
@@ -790,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
- i, params[i].pixel_freq,
- PIXEL_FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
- i, params[i].phy_freq,
- PHY_FREQ_1000_1001(params[i].phy_freq));
- /* Match strict frequency */
- if (phy_freq == params[i].phy_freq &&
- vclk_freq == params[i].vclk_freq)
- return MODE_OK;
- /* Match 1000/1001 variant */
- if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
- vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
+ if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
return MODE_OK;
}
@@ -1075,10 +1090,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if ((phy_freq == params[freq].phy_freq ||
- phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
- (vclk_freq == params[freq].vclk_freq ||
- vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
+ vclk_freq)) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 412dcbea0e2d..a962ae564a75 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MGAG200
tristate "Matrox G200"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mgag200/mgag200_ddc.c b/drivers/gpu/drm/mgag200/mgag200_ddc.c
index 6d81ea8931e8..c31673eaa554 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ddc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ddc.c
@@ -26,7 +26,6 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 819a7e9381e3..f4bf40cd7c88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -382,10 +382,10 @@ int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index c20ed0ab50ec..23debc70dc54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -200,9 +200,9 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 78be964eb97c..f8796e2b7a0f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -201,9 +201,9 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 7a32d3b1d226..e80da12ba1fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -332,9 +332,9 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6067d08aeee3..951d715dea30 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -30,35 +31,37 @@
* This file contains setup code for the CRTC.
*/
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+static void mgag200_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- int i;
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, i8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, g8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b8);
+}
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format)
+{
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- }
+ drm_crtc_fill_gamma_888(crtc, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -67,36 +70,19 @@ void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
-
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8);
- }
+ drm_crtc_load_gamma_888(crtc, lut, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -642,9 +628,9 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
const struct drm_format_info *format = mgag200_crtc_state->format;
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
}
}
@@ -665,9 +651,9 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
funcs->pixpllc_atomic_update(crtc, old_state);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 974bc7c0ea76..250246f81ea9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -15,21 +15,13 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
- select DRM_CLIENT_SELECTION
- select DRM_DISPLAY_DP_AUX_BUS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_BRIDGE_CONNECTOR
select DRM_EXEC
- select DRM_KMS_HELPER
- select DRM_PANEL
- select DRM_BRIDGE
- select DRM_PANEL_BRIDGE
+ select DRM_GPUVM
select DRM_SCHED
- select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
select SHMEM
select TMPFS
select QCOM_SCM
+ select QCOM_UBWC_CONFIG
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -66,6 +58,22 @@ config DRM_MSM_VALIDATE_XML
Validate XML files with register definitions against rules-fd schema.
This option is mostly targeting DRM MSM developers. If unsure, say N.
+config DRM_MSM_KMS
+ def_bool n
+ depends on DRM_MSM
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_PANEL_BRIDGE
+
+config DRM_MSM_KMS_FBDEV
+ def_bool DRM_FBDEV_EMULATION
+ depends on DRM_MSM_KMS
+ select FB_SYSMEM_HELPERS
+
config DRM_MSM_MDSS
bool
depends on DRM_MSM
@@ -74,6 +82,7 @@ config DRM_MSM_MDSS
config DRM_MSM_MDP4
bool "Enable MDP4 support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v4 (MDP4) in
@@ -84,6 +93,7 @@ config DRM_MSM_MDP5
bool "Enable MDP5 support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v5 (MDP5) in
@@ -94,6 +104,7 @@ config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
select DRM_DISPLAY_DSC_HELPER
default y
help
@@ -104,7 +115,11 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
+ select DRM_DISPLAY_DP_AUX_BUS
+ select DRM_DISPLAY_DP_HELPER
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
@@ -114,6 +129,7 @@ config DRM_MSM_DP
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
select DRM_PANEL
select DRM_MIPI_DSI
select DRM_DISPLAY_DSC_HELPER
@@ -169,6 +185,7 @@ config DRM_MSM_DSI_7NM_PHY
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
default y
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5df20cbeafb8..0c0dfb25f01b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,7 +48,6 @@ msm-display-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
- disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
@@ -101,18 +100,15 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
msm-display-$(CONFIG_DRM_MSM_MDSS) += \
msm_mdss.o \
-msm-display-y += \
+msm-display-$(CONFIG_DRM_MSM_KMS) += \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
msm-y += \
- msm_atomic.o \
- msm_atomic_tracepoints.o \
msm_debugfs.o \
msm_drv.o \
- msm_fb.o \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
@@ -123,21 +119,24 @@ msm-y += \
msm_gpu_devfreq.o \
msm_io_utils.o \
msm_iommu.o \
- msm_kms.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
msm_submitqueue.o \
+ msm_syncobj.o \
msm_gpu_tracepoints.o \
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm-$(CONFIG_DRM_MSM_KMS) += \
+ msm_atomic.o \
+ msm_atomic_tracepoints.o \
+ msm_fb.o \
+ msm_kms.o \
-msm-display-$(CONFIG_DEBUG_FS) += \
- dp/dp_debug.o
+msm-$(CONFIG_DRM_MSM_KMS_FBDEV) += msm_fbdev.o
msm-display-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
- dp/dp_catalog.o \
dp/dp_ctrl.o \
+ dp/dp_debug.o \
dp/dp_display.o \
dp/dp_drm.o \
dp/dp_link.o \
@@ -160,7 +159,8 @@ msm-display-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
-msm-y += $(adreno-y) $(msm-display-y)
+msm-y += $(adreno-y)
+msm-$(CONFIG_DRM_MSM_KMS) += $(msm-display-y)
obj-$(CONFIG_DRM_MSM) += msm.o
@@ -196,6 +196,11 @@ ADRENO_HEADERS = \
generated/a4xx.xml.h \
generated/a5xx.xml.h \
generated/a6xx.xml.h \
+ generated/a6xx_descriptors.xml.h \
+ generated/a6xx_enums.xml.h \
+ generated/a6xx_perfcntrs.xml.h \
+ generated/a7xx_enums.xml.h \
+ generated/a7xx_perfcntrs.xml.h \
generated/a6xx_gmu.xml.h \
generated/adreno_common.xml.h \
generated/adreno_pm4.xml.h \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 9ddb7b31fd98..5ddd015f930d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -45,8 +45,3 @@ static const struct adreno_info a2xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a2xx);
-
-MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
-MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
-MODULE_FIRMWARE("qcom/yamato_pfp.fw");
-MODULE_FIRMWARE("qcom/yamato_pm4.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 379a3d346c30..ec38db45d8a3 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+ a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
@@ -466,19 +466,18 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
-static struct msm_gem_address_space *
-a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfff * SZ_64K);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -504,7 +503,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = a2xx_create_address_space,
+ .create_vm = a2xx_create_vm,
.get_rptr = a2xx_get_rptr,
},
};
@@ -551,14 +550,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
- if (!gpu->aspace) {
- dev_err(dev->dev, "No memory protection without MMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 39641551eeb6..0407c9bc8c1b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
}
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
+ WARN_ON(off != 0);
+
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
@@ -71,10 +74,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
return 0;
}
-static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
-{
-}
-
static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
@@ -90,7 +89,6 @@ static const struct msm_mmu_funcs funcs = {
.map = a2xx_gpummu_map,
.unmap = a2xx_gpummu_unmap,
.destroy = a2xx_gpummu_destroy,
- .resume_translation = a2xx_gpummu_resume_translation,
};
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 2eb6c3e93748..1498e6532f62 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -85,8 +85,3 @@ static const struct adreno_info a3xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a3xx);
-
-MODULE_FIRMWARE("qcom/a300_pm4.fw");
-MODULE_FIRMWARE("qcom/a300_pfp.fw");
-MODULE_FIRMWARE("qcom/a330_pm4.fw");
-MODULE_FIRMWARE("qcom/a330_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b6df115bb567..a956cd79195e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -526,7 +526,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a3xx_gpu_busy,
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a3xx_get_rptr,
},
};
@@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 93519f807f87..09f9f228b75e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -45,6 +45,3 @@ static const struct adreno_info a4xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a4xx);
-
-MODULE_FIRMWARE("qcom/a420_pm4.fw");
-MODULE_FIRMWARE("qcom/a420_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index f1b18a6663f7..83f6329accba 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -645,7 +645,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a4xx_gpu_busy,
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
@@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index 633f31539162..b48a636d8237 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -150,12 +150,3 @@ static const struct adreno_info a5xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a5xx);
-
-MODULE_FIRMWARE("qcom/a530_pm4.fw");
-MODULE_FIRMWARE("qcom/a530_pfp.fw");
-MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
-MODULE_FIRMWARE("qcom/a530_zap.mdt");
-MODULE_FIRMWARE("qcom/a530_zap.b00");
-MODULE_FIRMWARE("qcom/a530_zap.b01");
-MODULE_FIRMWARE("qcom/a530_zap.b02");
-MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 169b8fe688f8..625a4e787d8f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -116,13 +116,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 650e5bac225f..4a04dc43a8e6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
@@ -620,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu)
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a5xx_gpu->shadow_bo,
+ gpu->vm, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
@@ -833,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13;
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
@@ -1040,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
@@ -1455,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -1555,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
return;
}
@@ -1563,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1711,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
@@ -1754,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct qcom_ubwc_cfg_data *common_cfg;
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
@@ -1784,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- adreno_gpu->ubwc_config.highest_bank_bit = 15;
- else
- adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return ERR_CAST(common_cfg);
- /* a5xx only supports UBWC 1.0, these are not configurable */
- adreno_gpu->ubwc_config.macrotile_mode = 0;
- adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ adreno_gpu->_ubwc_config = *common_cfg;
+ adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config;
adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 6b91e0bd1514..d6da7351cfbb 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
ptr = msm_gem_kernel_new(drm, bosize,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 36f72c43eae8..e4924b5e1c48 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -79,7 +79,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
+ struct a5xx_gpu *a5xx_gpu = timer_container_of(a5xx_gpu, t,
+ preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
@@ -254,7 +255,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -262,9 +263,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
+ MSM_BO_WC, gpu->vm, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
- msm_gem_kernel_put(bo, gpu->aspace);
+ msm_gem_kernel_put(bo, gpu->vm);
return PTR_ERR(counters);
}
@@ -295,8 +296,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++) {
- msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
- msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 53e2ff4406d8..00e1afd46b81 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -681,6 +681,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
@@ -713,6 +714,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -743,7 +745,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
@@ -769,7 +772,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
@@ -791,6 +795,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -815,6 +820,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -838,8 +844,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -874,7 +881,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 137, 1 },
@@ -907,7 +913,6 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -920,8 +925,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -939,8 +945,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -973,7 +980,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
@@ -1000,7 +1006,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060300),
.family = ADRENO_6XX_GEN4,
@@ -1019,7 +1024,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1039,7 +1043,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
@@ -1056,8 +1059,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -1085,22 +1089,10 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
- .address_space_size = SZ_16G,
}
};
DECLARE_ADRENO_GPULIST(a6xx);
-MODULE_FIRMWARE("qcom/a615_zap.mbn");
-MODULE_FIRMWARE("qcom/a619_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_sqe.fw");
-MODULE_FIRMWARE("qcom/a630_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_zap.mbn");
-MODULE_FIRMWARE("qcom/a640_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_sqe.fw");
-MODULE_FIRMWARE("qcom/a660_gmu.bin");
-MODULE_FIRMWARE("qcom/a660_sqe.fw");
-
static const struct adreno_reglist a702_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 },
@@ -1343,7 +1335,7 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
REG_A6XX_RB_NC_MODE_CNTL,
REG_A6XX_RB_CMP_DBG_ECO_CNTL,
REG_A7XX_GRAS_NC_MODE_CNTL,
- REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE,
REG_A6XX_UCHE_GBIF_GX_CONFIG,
REG_A6XX_UCHE_CLIENT_PF,
REG_A6XX_TPL1_DBG_ECO_CNTL1,
@@ -1395,7 +1387,6 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
- .address_space_size = SZ_16G,
.preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
@@ -1429,7 +1420,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
@@ -1451,8 +1441,14 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
- .address_space_size = SZ_256G,
.preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 59, 1 },
+ { 7, 2 },
+ { 232, 3 },
+ { 146, 4 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
@@ -1484,8 +1480,46 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43030c00),
+ .family = ADRENO_7XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen71500_sqe.fw",
+ [ADRENO_FW_GMU] = "gen71500_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a740_hwcg,
+ .protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
+ .gmu_chipid = 0x70f0000,
+ .gmu_cgc_mode = 0x00020222,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
+ },
+ .preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 294, 1 },
+ { 263, 2 },
+ { 233, 3 },
+ { 141, 4 },
+ ),
}
};
DECLARE_ADRENO_GPULIST(a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c8711938a5f4..28e6705c6da6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1267,15 +1259,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
-
- gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
- msm_gem_address_space_put(gmu->aspace);
+ struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
+
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->log.obj, gmu->vm);
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gmu->vm);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
@@ -1304,7 +1298,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
if (IS_ERR(bo->obj))
return PTR_ERR(bo->obj);
- ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova,
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
@@ -1319,7 +1313,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return 0;
}
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
{
struct msm_mmu *mmu;
@@ -1329,9 +1323,9 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
if (IS_ERR(mmu))
return PTR_ERR(mmu);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
- if (IS_ERR(gmu->aspace))
- return PTR_ERR(gmu->aspace);
+ gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true);
+ if (IS_ERR(gmu->vm))
+ return PTR_ERR(gmu->vm);
return 0;
}
@@ -1671,6 +1665,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+ extern bool disable_acd;
+
+ /* Skip ACD probe if requested via module param */
+ if (disable_acd) {
+ DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
+ return 0;
+ }
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* It is a problem if qmp node is unavailable when ACD is required */
+ if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
+ return -EINVAL;
+ }
+
+ /* Otherwise, nothing to do if qmp is unavailable */
+ if (IS_ERR_OR_NULL(gmu->qmp))
+ return 0;
+
+ /*
+ * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on
+ * system reset. So it is harmless if we couldn't notify 'OFF' state
+ */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
+ if (ret && cmd->enable_by_level) {
+ DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1879,7 +1942,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- ret = a6xx_gmu_memory_probe(gmu);
+ ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu);
if (ret)
goto err_put_device;
@@ -1989,10 +2052,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ /* Other errors are handled during GPU ACD probe */
gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
+ if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto detach_gxpd;
}
init_completion(&gmu->pd_gate);
@@ -2008,6 +2072,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -2018,7 +2086,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
device_link_del(link);
detach_cxpd:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 0c888b326cfb..d1ce11131ba6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -62,7 +62,7 @@ struct a6xx_gmu {
/* For serializing communication with the GMU: */
struct mutex lock;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
void __iomem *mmio;
void __iomem *rscc;
@@ -93,6 +93,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 242d02d48c0c..45dd5fd1c2bf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -111,7 +111,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
+ struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
@@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (ctx->seqno == ring->cur_ctx_seqno)
return;
- if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid))
return;
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
@@ -130,6 +131,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno - 1);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ /* Reset state used to synchronize BR and BV */
+ OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
+ OUT_RING(ring,
+ CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER |
+ CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
}
if (!sysprof) {
@@ -212,6 +227,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
@@ -335,6 +352,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
/*
* Toggle concurrent binning for pagetable switch and set the thread to
* BR since only it can execute the pagetable switch packets.
@@ -585,118 +604,118 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]);
}
-static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
+static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- gpu->ubwc_config.rgb565_predicator = 0;
- gpu->ubwc_config.uavflagprd_inv = 0;
- gpu->ubwc_config.min_acc_len = 0;
- gpu->ubwc_config.ubwc_swizzle = 0x6;
- gpu->ubwc_config.macrotile_mode = 0;
- gpu->ubwc_config.highest_bank_bit = 15;
+ const struct qcom_ubwc_cfg_data *common_cfg;
+ struct qcom_ubwc_cfg_data *cfg = &gpu->_ubwc_config;
+
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return PTR_ERR(common_cfg);
+
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ *cfg = *common_cfg;
+
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x7;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
- gpu->ubwc_config.highest_bank_bit = 14;
+ cfg->highest_bank_bit = 14;
if (adreno_is_a619(gpu))
/* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
- gpu->ubwc_config.highest_bank_bit = 13;
+ cfg->highest_bank_bit = 13;
if (adreno_is_a619_holi(gpu))
- gpu->ubwc_config.highest_bank_bit = 13;
-
- if (adreno_is_a621(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- }
-
- if (adreno_is_a623(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a640_family(gpu))
- gpu->ubwc_config.amsbc = 1;
+ if (adreno_is_a621(gpu))
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a680(gpu))
- gpu->ubwc_config.macrotile_mode = 1;
+ if (adreno_is_a623(gpu))
+ cfg->highest_bank_bit = 16;
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
adreno_is_a730(gpu) ||
adreno_is_a740_family(gpu)) {
- /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
+ /* TODO: get ddr type from bootloader and use 15 for LPDDR4 */
+ cfg->highest_bank_bit = 16;
}
if (adreno_is_a663(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x4;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x4;
}
- if (adreno_is_7c3(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ if (adreno_is_7c3(gpu))
+ cfg->highest_bank_bit = 14;
- if (adreno_is_a702(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.min_acc_len = 1;
- }
+ if (adreno_is_a702(gpu))
+ cfg->highest_bank_bit = 14;
+
+ if (cfg->highest_bank_bit != common_cfg->highest_bank_bit)
+ DRM_WARN_ONCE("Inconclusive highest_bank_bit value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->highest_bank_bit, common_cfg->highest_bank_bit);
+
+ if (cfg->ubwc_swizzle != common_cfg->ubwc_swizzle)
+ DRM_WARN_ONCE("Inconclusive ubwc_swizzle value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->ubwc_swizzle, common_cfg->ubwc_swizzle);
+
+ gpu->ubwc_config = &gpu->_ubwc_config;
+
+ return 0;
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
/*
* We subtract 13 from the highest bank bit (13 is the minimum value
* allowed by hw) and write the lowest two bits of the remaining value
* as hbb_lo and the one above it as hbb_hi to the hardware.
*/
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(cfg->highest_bank_bit < 13);
+ u32 hbb = cfg->highest_bank_bit - 13;
+ bool rgb565_predicator = cfg->ubwc_enc_version >= UBWC_4_0;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ bool amsbc = cfg->ubwc_enc_version >= UBWC_3_0;
+ bool min_acc_len_64b = false;
+ u8 uavflagprd_inv = 0;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
- u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
- u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
+
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
+ uavflagprd_inv = 2;
+
+ if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ min_acc_len_64b = true;
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
level2_swizzling_dis << 12 |
- adreno_gpu->ubwc_config.rgb565_predicator << 11 |
- hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ rgb565_predicator << 11 |
+ hbb_hi << 10 | amsbc << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
level2_swizzling_dis << 6 | hbb_hi << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
level2_swizzling_dis << 12 | hbb_hi << 10 |
- adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
@@ -704,10 +723,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
FIELD_PREP(GENMASK(8, 5), hbb_lo));
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
- adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+ min_acc_len_64b << 23 | hbb_lo << 21);
gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
- adreno_gpu->ubwc_config.macrotile_mode);
+ cfg->macrotile_mode);
}
static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
@@ -953,7 +972,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
@@ -970,7 +989,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->shadow_bo,
+ gpu->vm, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
if (IS_ERR(a6xx_gpu->shadow))
@@ -981,7 +1000,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->pwrup_reglist_bo,
+ gpu->vm, &a6xx_gpu->pwrup_reglist_bo,
&a6xx_gpu->pwrup_reglist_iova);
if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr))
@@ -2194,12 +2213,12 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
if (a6xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->shadow_bo);
}
@@ -2239,8 +2258,8 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
mutex_unlock(&a6xx_gpu->gmu.lock);
}
-static struct msm_gem_address_space *
-a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -2254,22 +2273,21 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
!device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- return adreno_iommu_create_address_space(gpu, pdev, quirks);
+ return adreno_iommu_create_vm(gpu, pdev, quirks);
}
-static struct msm_gem_address_space *
-a6xx_create_private_address_space(struct msm_gpu *gpu)
+static struct drm_gpuvm *
+a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
{
struct msm_mmu *mmu;
- mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+ mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL,
- adreno_private_address_space_size(gpu));
+ return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START,
+ adreno_private_vm_size(gpu), kernel_managed);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -2386,8 +2404,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2415,8 +2433,8 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2446,8 +2464,8 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2543,11 +2561,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
- a6xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a6xx_fault_handler);
+
+ ret = a6xx_calc_ubwc_config(adreno_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
- a6xx_calc_ubwc_config(adreno_gpu);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a6xx_preempt_init(gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 9201a53dd341..6e71f617fc3d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -6,6 +6,10 @@
#include "adreno_gpu.h"
+#include "a6xx_enums.xml.h"
+#include "a7xx_enums.xml.h"
+#include "a6xx_perfcntrs.xml.h"
+#include "a7xx_perfcntrs.xml.h"
#include "a6xx.xml.h"
#include "a6xx_gmu.h"
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 341a72a67401..faca2a0243ab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -132,7 +132,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -158,7 +158,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
/* Make sure all pending memory writes are posted */
wmb();
- gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova);
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova);
gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
@@ -1619,7 +1619,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a7xx_get_clusters(gpu, a6xx_state, dumper);
a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
a7xx_get_post_crashdumper_registers(gpu, a6xx_state);
@@ -1631,7 +1631,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_clusters(gpu, a6xx_state, dumper);
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e545106c70be..95d93ac6812a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -212,7 +212,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
- SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_GFX_UAV_BASE_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 0989aee3dd2c..8e69b1e84657 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -100,16 +100,14 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
- u32 *payload, u32 payload_size)
+static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
{
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- u32 val;
int ret;
+ u32 val;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -122,6 +120,19 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ int ret;
+
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret)
+ return ret;
+
for (;;) {
struct a6xx_hfi_msg_response resp;
@@ -129,12 +140,18 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
- /* If the queue is empty our response never made it */
+ /* If the queue is empty, there may have been previous missed
+ * responses that preceded the response to our packet. Wait
+ * further before we give up.
+ */
if (!ret) {
- DRM_DEV_ERROR(gmu->dev,
- "The HFI response queue is unexpectedly empty\n");
-
- return -ENOENT;
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+ return ret;
+ }
+ continue;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
@@ -748,6 +765,38 @@ send:
NULL, 0);
}
+#define HFI_FEATURE_ACD 12
+
+static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = HFI_FEATURE_ACD,
+ .enable = 1,
+ .data = 0,
+ };
+ int ret;
+
+ if (!acd_table->enable_by_level)
+ return 0;
+
+ /* Enable ACD feature at GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
+ return ret;
+ }
+
+ /* Send ACD table to GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
@@ -845,6 +894,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_acd(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 52ba4a07d7b9..653ef720e2da 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_ACD 7
+#define MAX_ACD_STRIDE 2
+
+struct a6xx_hfi_acd_table {
+ u32 header;
+ u32 version;
+ u32 enable_by_level;
+ u32 stride;
+ u32 num_levels;
+ u32 data[16 * MAX_ACD_STRIDE];
+};
+
#define HFI_H2F_MSG_START 10
struct a6xx_hfi_msg_start {
u32 header;
};
+#define HFI_H2F_FEATURE_CTRL 11
+
+struct a6xx_hfi_msg_feature_ctrl {
+ u32 header;
+ u32 feature;
+ u32 enable;
+ u32 data;
+};
+
#define HFI_H2F_MSG_CORE_FW_START 14
struct a6xx_hfi_msg_core_fw_start {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 9b5e27d2373c..6a12a35dabff 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -87,7 +87,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
static void a6xx_preempt_timer(struct timer_list *t)
{
- struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer);
+ struct a6xx_gpu *a6xx_gpu = timer_container_of(a6xx_gpu, t,
+ preempt_timer);
struct msm_gpu *gpu = &a6xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
@@ -209,7 +210,7 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0);
/* Enable the GMEM save/restore feature for preemption */
- gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1);
/* Reset the preemption state */
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
@@ -343,7 +344,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_RECORD_SIZE(adreno_gpu),
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -361,7 +362,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_SMMU_INFO_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &bo, &iova);
+ gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -376,7 +377,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
- msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid);
+ msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid);
smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
smmu_info_ptr->ttbr0 = ttbr;
@@ -404,7 +405,7 @@ void a6xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->vm);
}
void a6xx_preempt_init(struct msm_gpu *gpu)
@@ -430,7 +431,7 @@ void a6xx_preempt_init(struct msm_gpu *gpu)
a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev,
PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &a6xx_gpu->preempt_postamble_bo,
+ gpu->vm, &a6xx_gpu->preempt_postamble_bo,
&a6xx_gpu->preempt_postamble_iova);
preempt_prepare_postamble(a6xx_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 236b25c094cd..50945bfe9b49 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -16,14 +16,14 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
-bool allow_vram_carveout = false;
-MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
-module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
-
int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
+bool disable_acd;
+MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
+module_param_unsafe(disable_acd, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
@@ -133,9 +133,8 @@ err_disable_rpm:
return NULL;
}
-static int find_chipid(struct device *dev, uint32_t *chipid)
+static int find_chipid(struct device_node *node, uint32_t *chipid)
{
- struct device_node *node = dev->of_node;
const char *compat;
int ret;
@@ -169,15 +168,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret) {
- DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n",
+ node, ret);
return ret;
}
- dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
return 0;
}
+bool adreno_has_gpu(struct device_node *node)
+{
+ const struct adreno_info *info;
+ uint32_t chip_id;
+ int ret;
+
+ ret = find_chipid(node, &chip_id);
+ if (ret)
+ return false;
+
+ info = adreno_info(chip_id);
+ if (!info) {
+ pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
+ node, ADRENO_CHIPID_ARGS(chip_id));
+ return false;
+ }
+
+ return true;
+}
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
@@ -187,19 +207,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
struct msm_gpu *gpu;
int ret;
- ret = find_chipid(dev, &config.chip_id);
- if (ret)
+ ret = find_chipid(dev->of_node, &config.chip_id);
+ /* We shouldn't have gotten this far if we can't parse the chip_id */
+ if (WARN_ON(ret))
return ret;
dev->platform_data = &config;
priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.chip_id);
- if (!info) {
- dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
- ADRENO_CHIPID_ARGS(config.chip_id));
+ /* We shouldn't have gotten this far if we don't recognize the GPU: */
+ if (WARN_ON(!info))
return -ENXIO;
- }
config.info = info;
@@ -241,42 +260,23 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
-static void adreno_device_register_headless(void)
-{
- /* on imx5, we don't have a top-level mdp/dpu node
- * this creates a dummy node for the driver for that case
- */
- struct platform_device_info dummy_info = {
- .parent = NULL,
- .name = "msm",
- .id = -1,
- .res = NULL,
- .num_res = 0,
- .data = NULL,
- .size_data = 0,
- .dma_mask = ~0,
- };
- platform_device_register_full(&dummy_info);
-}
-
static int adreno_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") ||
+ msm_gpu_no_components())
+ return msm_gpu_probe(pdev, &a3xx_ops);
- int ret;
-
- ret = component_add(&pdev->dev, &a3xx_ops);
- if (ret)
- return ret;
-
- if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
- adreno_device_register_headless();
-
- return 0;
+ return component_add(&pdev->dev, &a3xx_ops);
}
static void adreno_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &a3xx_ops);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+
+ if (priv->kms_init)
+ component_del(&pdev->dev, &a3xx_ops);
+ else
+ msm_gpu_remove(pdev, &a3xx_ops);
}
static void adreno_shutdown(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 9a327d543f27..e02cabb39f19 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1311,8 +1311,8 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR,
- REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100},
+ { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
{ "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 26db1f4b5fb9..f1230465bf0d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,25 +191,27 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev)
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev)
{
- return adreno_iommu_create_address_space(gpu, pdev, 0);
+ return adreno_iommu_create_vm(gpu, pdev, 0);
}
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks)
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (!mmu)
+ return ERR_PTR(-ENODEV);
+ else if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
@@ -224,47 +226,94 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
start = max_t(u64, SZ_16M, geometry->aperture_start);
size = geometry->aperture_end - start + 1;
- aspace = msm_gem_address_space_create(mmu, "gpu",
- start & GENMASK_ULL(48, 0), size);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0),
+ size, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
-u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+u64 adreno_private_vm_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ const struct io_pgtable_cfg *ttbr1_cfg;
if (address_space_size)
return address_space_size;
- if (adreno_gpu->info->address_space_size)
- return adreno_gpu->info->address_space_size;
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA)
+ return SZ_4G;
- return SZ_4G;
+ if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg)
+ return SZ_4G;
+
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+
+ /*
+ * Userspace VM is actually using TTBR0, but both are the same size,
+ * with b48 (sign bit) selecting which TTBRn to use. So if IAS is
+ * 48, the total (kernel+user) address space size is effectively
+ * 49 bits. But what userspace is control of is the lower 48.
+ */
+ return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
+}
+
+void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned long flags;
+
+ /*
+ * Wait until the cooldown period has passed and we would actually
+ * collect a crashdump to re-enable stall-on-fault.
+ */
+ spin_lock_irqsave(&priv->fault_stall_lock, flags);
+ if (!priv->stall_enabled &&
+ ktime_after(ktime_get(), priv->stall_reenable_time) &&
+ !READ_ONCE(gpu->crashstate)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+
+ priv->stall_enabled = true;
+
+ mmu->funcs->set_stall(mmu, true);
+ }
+ spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
}
#define ARM_SMMU_FSR_TF BIT(1)
#define ARM_SMMU_FSR_PF BIT(3)
#define ARM_SMMU_FSR_EF BIT(4)
+#define ARM_SMMU_FSR_SS BIT(30)
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
- bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+ bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
+ !READ_ONCE(gpu->crashstate);
+ unsigned long irq_flags;
/*
- * If we aren't going to be resuming later from fault_worker, then do
- * it now.
+ * In case there is a subsequent storm of pagefaults, disable
+ * stall-on-fault for at least half a second.
*/
- if (!do_devcoredump) {
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+ if (priv->stall_enabled) {
+ priv->stall_enabled = false;
+
+ mmu->funcs->set_stall(mmu, false);
}
+ priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
+
/*
* Print a default message if we couldn't get the data from the
* adreno-smmu-priv
@@ -291,26 +340,37 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
scratch[0], scratch[1], scratch[2], scratch[3]);
if (do_devcoredump) {
+ struct msm_gpu_fault_info fault_info = {};
+
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
- gpu->fault_info.ttbr0 = info->ttbr0;
- gpu->fault_info.iova = iova;
- gpu->fault_info.flags = flags;
- gpu->fault_info.type = type;
- gpu->fault_info.block = block;
+ fault_info.ttbr0 = info->ttbr0;
+ fault_info.iova = iova;
+ fault_info.flags = flags;
+ fault_info.type = type;
+ fault_info.block = block;
- kthread_queue_work(gpu->worker, &gpu->fault_work);
+ msm_gpu_fault_crashstate_capture(gpu, &fault_info);
}
return 0;
}
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+static bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *drm = gpu->dev;
+ /* Note ctx can be NULL when called from rd_open(): */
+ struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
/* No pointer params yet */
if (*len != 0)
@@ -356,8 +416,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
- if (ctx->aspace)
- *value = gpu->global_faults + ctx->aspace->faults;
+ if (vm)
+ *value = gpu->global_faults + to_msm_vm(vm)->faults;
else
*value = gpu->global_faults;
return 0;
@@ -365,36 +425,39 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_start;
+ *value = vm->mm_start;
return 0;
case MSM_PARAM_VA_SIZE:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_size;
+ *value = vm->mm_range;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
- *value = adreno_gpu->ubwc_config.highest_bank_bit;
+ *value = adreno_gpu->ubwc_config->highest_bank_bit;
return 0;
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
case MSM_PARAM_UBWC_SWIZZLE:
- *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ *value = adreno_gpu->ubwc_config->ubwc_swizzle;
return 0;
case MSM_PARAM_MACROTILE_MODE:
- *value = adreno_gpu->ubwc_config.macrotile_mode;
+ *value = adreno_gpu->ubwc_config->macrotile_mode;
return 0;
case MSM_PARAM_UCHE_TRAP_BASE:
*value = adreno_gpu->uche_trap_base;
return 0;
+ case MSM_PARAM_HAS_PRR:
+ *value = adreno_smmu_has_prr(gpu);
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
struct drm_device *drm = gpu->dev;
@@ -440,7 +503,22 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return UERR(EPERM, drm, "invalid permissions");
- return msm_file_private_set_sysprof(ctx, gpu, value);
+ return msm_context_set_sysprof(ctx, gpu, value);
+ case MSM_PARAM_EN_VM_BIND:
+ /* We can only support VM_BIND with per-process pgtables: */
+ if (ctx->vm == gpu->vm)
+ return UERR(EINVAL, drm, "requires per-process pgtables");
+
+ /*
+ * We can only swtich to VM_BIND mode if the VM has not yet
+ * been created:
+ */
+ if (ctx->vm)
+ return UERR(EBUSY, drm, "VM already created");
+
+ ctx->userspace_managed_vm = value;
+
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
@@ -562,7 +640,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
void *ptr;
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
@@ -755,6 +833,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
+ kfree(state->vm_logs);
kfree(state->bos);
kfree(state->comm);
kfree(state->cmd);
@@ -895,6 +974,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
+ if (state->vm_logs) {
+ drm_puts(p, "vm-log:\n");
+ for (i = 0; i < state->nr_vm_logs; i++) {
+ struct msm_gem_vm_log_entry *e = &state->vm_logs[i];
+ drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+ }
+
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
drm_puts(p, "ringbuffer:\n");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 92caba3584da..9dc93c247196 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -12,13 +12,14 @@
#include <linux/firmware.h>
#include <linux/iopoll.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_gpu.h"
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
-extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -57,6 +58,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
+#define ADRENO_QUIRK_4GB_VA BIT(6)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -104,7 +106,6 @@ struct adreno_info {
union {
const struct a6xx_info *a6xx;
};
- u64 address_space_size;
/**
* @speedbins: Optional table of fuse to speedbin mappings
*
@@ -205,44 +206,12 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *fw[ADRENO_FW_MAX];
- struct {
- /**
- * @rgb565_predicator: Unknown, introduced with A650 family,
- * related to UBWC mode/ver 4
- */
- u32 rgb565_predicator;
- /** @uavflagprd_inv: Unknown, introduced with A650 family */
- u32 uavflagprd_inv;
- /** @min_acc_len: Whether the minimum access length is 64 bits */
- u32 min_acc_len;
- /**
- * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
- *
- * UBWC 1.0 always enables all three levels.
- * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
- * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
- *
- * This is a bitmask where BIT(0) enables level 1, BIT(1)
- * controls level 2, and BIT(2) enables level 3.
- */
- u32 ubwc_swizzle;
- /**
- * @highest_bank_bit: Highest Bank Bit
- *
- * The Highest Bank Bit value represents the bit of the highest
- * DDR bank. This should ideally use DRAM type detection.
- */
- u32 highest_bank_bit;
- u32 amsbc;
- /**
- * @macrotile_mode: Macrotile Mode
- *
- * Whether to use 4-channel macrotiling mode or the newer
- * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
- * 4-channel and 1 is 8-channel.
- */
- u32 macrotile_mode;
- } ubwc_config;
+ /*
+ * The migration to the central UBWC config db is still in flight - keep
+ * a copy containing some local fixups until that's done.
+ */
+ const struct qcom_ubwc_cfg_data *ubwc_config;
+ struct qcom_ubwc_cfg_data _ubwc_config;
/*
* Register offsets are different between some GPUs.
@@ -578,10 +547,12 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
-u64 adreno_private_address_space_size(struct msm_gpu *gpu);
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
+#define ADRENO_VM_START 0x100000000ULL
+u64 adreno_private_vm_size(struct msm_gpu *gpu);
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -621,19 +592,21 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len,
* Common helper function to initialize the default address space for arm-smmu
* attached targets
*/
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev);
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev);
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks);
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks);
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4]);
+void adreno_check_and_reenable_stall(struct adreno_gpu *gpu);
+
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
/*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index 6ac97c378056..56d3c38c8778 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -21,43 +21,35 @@ static const struct dpu_caps sm8650_dpu_caps = {
static const struct dpu_mdp_cfg sm8650_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -140,7 +132,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -148,7 +140,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -156,7 +148,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -164,7 +156,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -172,14 +164,14 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -190,22 +182,18 @@ static const struct dpu_dspp_cfg sm8650_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -214,67 +202,57 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
.name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
},
@@ -308,32 +286,30 @@ static const struct dpu_dsc_cfg sm8650_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -342,7 +318,7 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -375,7 +351,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -384,7 +359,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -394,7 +368,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -404,7 +377,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
new file mode 100644
index 000000000000..db8cc2d0112c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Limited
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_12_0_SM8750_H
+#define _DPU_12_0_SM8750_H
+
+static const struct dpu_caps sm8750_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8750_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm8750_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8750_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8750_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ }, {
+ .name = "lm_6", .id = LM_6,
+ .base = 0x4a000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_7,
+ .pingpong = PINGPONG_6,
+ }, {
+ .name = "lm_7", .id = LM_7,
+ .base = 0x4b000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_6,
+ .pingpong = PINGPONG_7,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8750_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8750_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x6f000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 20),
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x70000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 21),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
+ .base = 0x7e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ }, {
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
+ .base = 0x7e400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8750_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x51000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x66700, .len = 0x1c,
+ }, {
+ .name = "merge_3d_5", .id = MERGE_3D_5,
+ .base = 0x7e700, .len = 0x1c,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8750_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_3_0", .id = DSC_6,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_3_1", .id = DSC_7,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8750_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_cwb_cfg sm8750_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x20,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x20,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7e200, .len = 0x20,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7e600, .len = 0x20,
+ },
+};
+
+static const struct dpu_intf_cfg sm8750_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8750_perf_data = {
+ .max_bw_low = 18900000,
+ .max_bw_high = 28500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8750_mdss_ver = {
+ .core_major_ver = 12,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8750_cfg = {
+ .mdss_ver = &sm8750_mdss_ver,
+ .caps = &sm8750_dpu_caps,
+ .mdp = &sm8750_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sm8750_ctl),
+ .ctl = sm8750_ctl,
+ .sspp_count = ARRAY_SIZE(sm8750_sspp),
+ .sspp = sm8750_sspp,
+ .mixer_count = ARRAY_SIZE(sm8750_lm),
+ .mixer = sm8750_lm,
+ .dspp_count = ARRAY_SIZE(sm8750_dspp),
+ .dspp = sm8750_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8750_pp),
+ .pingpong = sm8750_pp,
+ .dsc_count = ARRAY_SIZE(sm8750_dsc),
+ .dsc = sm8750_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8750_merge_3d),
+ .merge_3d = sm8750_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8750_wb),
+ .wb = sm8750_wb,
+ .cwb_count = ARRAY_SIZE(sm8750_cwb),
+ .cwb = sm8650_cwb,
+ .intf_count = ARRAY_SIZE(sm8750_intf),
+ .intf = sm8750_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8750_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ad60089f18ea..29e0eba91930 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8937_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -100,14 +99,12 @@ static const struct dpu_pingpong_cfg msm8937_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
@@ -118,7 +115,6 @@ static const struct dpu_dspp_cfg msm8937_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index a1cf89a0a42d..cb1ee4b63f9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8917_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -93,7 +92,6 @@ static const struct dpu_pingpong_cfg msm8917_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
@@ -104,7 +102,6 @@ static const struct dpu_dspp_cfg msm8917_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index eea9b80e2287..b44d02b48418 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8953_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -100,14 +99,12 @@ static const struct dpu_pingpong_cfg msm8953_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
@@ -118,7 +115,6 @@ static const struct dpu_dspp_cfg msm8953_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index ae18a354e5d2..8af63db315b4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -22,7 +22,6 @@ static const struct dpu_mdp_cfg msm8996_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -181,28 +180,24 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -223,12 +218,10 @@ static const struct dpu_dspp_cfg msm8996_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 746474679ef5..f91220496082 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -23,7 +23,6 @@ static const struct dpu_caps msm8998_dpu_caps = {
static const struct dpu_mdp_cfg msm8998_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -170,28 +169,24 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -212,12 +207,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index bb89da0a481d..8f9a097147c0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm660_dpu_caps = {
static const struct dpu_mdp_cfg sdm660_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -141,28 +140,24 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -183,12 +178,10 @@ static const struct dpu_dspp_cfg sdm660_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index 7caf876ca3e3..0ad18bd273ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm630_dpu_caps = {
static const struct dpu_mdp_cfg sdm630_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -115,14 +114,12 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
@@ -133,7 +130,6 @@ static const struct dpu_dspp_cfg sdm630_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index ab7b4822ca63..5cc9f55d542b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sdm845_dpu_caps = {
static const struct dpu_mdp_cfg sdm845_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -134,7 +133,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -142,7 +141,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -150,7 +149,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
@@ -158,7 +157,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -170,22 +169,18 @@ static const struct dpu_dspp_cfg sdm845_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -194,28 +189,24 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index c2fde980fb52..0f5e9babdeea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -11,7 +11,6 @@
static const struct dpu_mdp_cfg sdm670_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -69,7 +68,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -77,7 +76,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -85,14 +84,14 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -103,12 +102,10 @@ static const struct dpu_dspp_cfg sdm670_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 979527d98fbc..ae1b2ed96e9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_mdp_cfg sm8150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -37,37 +36,30 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -76,7 +68,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -84,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -92,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -100,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -108,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -116,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -124,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -132,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
@@ -144,7 +136,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -152,7 +144,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -160,7 +152,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -168,7 +160,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -176,14 +168,14 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -194,22 +186,18 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -218,42 +206,36 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -277,19 +259,15 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -297,7 +275,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -312,7 +290,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -321,7 +298,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -331,7 +307,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -341,7 +316,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d76b8992a6c1..b572cfa7ed35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_mdp_cfg sc8180x_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -41,32 +40,26 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -75,7 +68,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -83,7 +76,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -91,7 +84,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -99,7 +92,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -107,7 +100,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -115,7 +108,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -123,7 +116,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -131,7 +124,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
@@ -143,7 +136,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +144,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +152,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +160,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +168,14 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +186,18 @@ static const struct dpu_dspp_cfg sc8180x_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,42 +206,36 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -276,27 +259,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_4", .id = DSC_4,
.base = 0x81000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_5", .id = DSC_5,
.base = 0x81400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -304,7 +281,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -319,7 +296,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -328,7 +304,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +313,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -350,7 +324,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = 999,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +332,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x6c000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -368,7 +340,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x6c800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -383,6 +354,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
+ .min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 83db11339b29..a56c288ac10c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm7150_dpu_caps = {
static const struct dpu_mdp_cfg sm7150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -38,32 +37,26 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -72,7 +65,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -80,7 +73,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -88,7 +81,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -96,7 +89,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -104,7 +97,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -116,7 +109,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -124,7 +117,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -132,14 +125,14 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -150,12 +143,10 @@ static const struct dpu_dspp_cfg sm7150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -164,28 +155,24 @@ static const struct dpu_pingpong_cfg sm7150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -206,11 +193,9 @@ static const struct dpu_dsc_cfg sm7150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -218,7 +203,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -227,7 +211,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -237,7 +220,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -247,7 +229,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -260,7 +241,7 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index da11830d4407..26883f6b66b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sm6150_dpu_caps = {
static const struct dpu_mdp_cfg sm6150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -113,20 +106,17 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_2,
},
@@ -136,7 +126,6 @@ static const struct dpu_dspp_cfg sm6150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -145,19 +134,16 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
},
@@ -167,7 +153,7 @@ static const struct dpu_wb_cfg sm6150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -182,7 +168,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -191,7 +176,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -201,7 +185,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d3d3a34d0b45..fbf50f279e66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sm6125_dpu_caps = {
static const struct dpu_mdp_cfg sm6125_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6125_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -69,7 +62,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -77,7 +70,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -85,7 +78,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -97,7 +90,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -105,7 +97,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
.dspp = 0,
@@ -117,7 +108,6 @@ static const struct dpu_dspp_cfg sm6125_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,14 +116,12 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -144,7 +132,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -159,7 +147,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -168,7 +155,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = 0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 47e01c3c242f..7b8b7a1c2d76 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -35,37 +35,30 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -142,7 +135,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +143,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +151,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +159,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +167,14 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +185,18 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -275,19 +258,15 @@ static const struct dpu_dsc_cfg sm8250_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -295,7 +274,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -304,7 +282,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -314,7 +291,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -324,7 +300,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -337,7 +312,7 @@ static const struct dpu_wb_cfg sm8250_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 040c94c0bb66..c990ba3b5db0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -32,17 +32,14 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
};
@@ -51,7 +48,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -59,7 +56,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -67,7 +64,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -75,7 +72,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -87,7 +84,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -95,7 +92,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -106,7 +103,6 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -115,14 +111,12 @@ static const struct dpu_pingpong_cfg sc7180_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -133,7 +127,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -142,7 +135,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -156,7 +148,7 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 43f64a005f5a..343ff5482382 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg sm6115_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -38,7 +37,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -46,7 +45,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg sm6115_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg sm6115_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg sm6115_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg sm6115_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 397278ba999b..093d16bdc450 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -35,22 +35,18 @@ static const struct dpu_ctl_cfg sm6350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -59,7 +55,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -67,7 +63,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -75,7 +71,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -83,7 +79,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -95,7 +91,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -103,7 +99,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -115,7 +111,6 @@ static const struct dpu_dspp_cfg sm6350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -124,14 +119,12 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -142,7 +135,6 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -150,7 +142,7 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -165,7 +157,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
@@ -174,7 +165,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 3cbb2fe8aba2..47053bf9b0a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -46,7 +45,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg qcm2290_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg qcm2290_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg qcm2290_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index a06c8634d2d7..98190ee7ec7a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -30,7 +30,6 @@ static const struct dpu_ctl_cfg sm6375_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -39,7 +38,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -47,7 +46,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -59,7 +58,6 @@ static const struct dpu_lm_cfg sm6375_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.lm_pair = 0,
.pingpong = PINGPONG_0,
@@ -71,7 +69,6 @@ static const struct dpu_dspp_cfg sm6375_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -80,7 +77,6 @@ static const struct dpu_pingpong_cfg sm6375_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -91,7 +87,6 @@ static const struct dpu_dsc_cfg sm6375_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -99,7 +94,6 @@ static const struct dpu_intf_cfg sm6375_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 0c860e804cab..85aae40c210f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -35,37 +35,30 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -142,7 +135,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +143,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +151,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +159,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +167,14 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +185,18 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -280,22 +263,20 @@ static const struct dpu_dsc_cfg sm8350_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -304,7 +285,7 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -319,7 +300,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -328,7 +308,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +317,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -348,7 +326,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index e9625c48c567..8f978b9c3452 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -32,22 +32,18 @@ static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -92,21 +88,21 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -117,7 +113,6 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,28 +121,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -159,7 +150,7 @@ static const struct dpu_dsc_cfg sc7280_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
},
};
@@ -168,7 +159,7 @@ static const struct dpu_wb_cfg sc7280_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -183,7 +174,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -192,7 +182,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -202,7 +191,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index fcee1c3665f8..303d33dc7783 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
static const struct dpu_mdp_cfg sc8280xp_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -35,37 +34,30 @@ static const struct dpu_mdp_cfg sc8280xp_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -74,7 +66,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -82,7 +74,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -90,7 +82,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -98,7 +90,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -106,7 +98,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -114,7 +106,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -122,7 +114,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -130,7 +122,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
@@ -142,7 +134,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +142,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +150,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +158,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +166,14 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +184,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,42 +204,36 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -280,32 +262,28 @@ static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +293,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -324,7 +301,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -334,7 +310,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -344,7 +319,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -353,7 +327,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -362,7 +335,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -371,7 +343,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3a000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -380,7 +351,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -389,7 +359,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 19b2ee8bbd5f..b09a6af4c474 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8450_dpu_caps = {
static const struct dpu_mdp_cfg sm8450_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -36,37 +35,30 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -143,7 +135,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +143,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +151,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +159,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +167,14 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +185,18 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,55 +205,47 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -296,22 +276,20 @@ static const struct dpu_dsc_cfg sm8450_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -320,7 +298,7 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -335,7 +313,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -344,7 +321,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -354,7 +330,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -364,7 +339,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 4d96ce71746f..0f7b4a224e4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sa8775p_dpu_caps = {
static const struct dpu_mdp_cfg sa8775p_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -35,37 +34,30 @@ static const struct dpu_mdp_cfg sa8775p_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -142,7 +134,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +142,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +150,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +158,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +166,14 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +184,18 @@ static const struct dpu_dspp_cfg sa8775p_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,55 +204,47 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -295,32 +275,28 @@ static const struct dpu_dsc_cfg sa8775p_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -329,7 +305,7 @@ static const struct dpu_wb_cfg sa8775p_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -345,7 +321,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -354,7 +329,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -364,7 +338,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -374,7 +347,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -383,7 +355,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -392,7 +363,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -401,7 +371,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -410,7 +379,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 24f988465bf6..465b6460f875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -21,43 +21,35 @@ static const struct dpu_caps sm8550_dpu_caps = {
static const struct dpu_mdp_cfg sm8550_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -66,70 +58,70 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
@@ -140,7 +132,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -148,7 +140,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -156,7 +148,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -164,7 +156,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -172,14 +164,14 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -190,22 +182,18 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -213,55 +201,47 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -292,22 +272,20 @@ static const struct dpu_dsc_cfg sm8550_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -316,7 +294,7 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -330,7 +308,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -339,7 +316,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -349,7 +325,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +334,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
new file mode 100644
index 000000000000..6caa7d40f368
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_9_1_SAR2130P_H
+#define _DPU_9_1_SAR2130P_H
+
+static const struct dpu_caps sar2130p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sar2130p_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sar2130p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sar2130p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sar2130p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sar2130p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+static const struct dpu_pingpong_cfg sar2130p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sar2130p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sar2130p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sar2130p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sar2130p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sar2130p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 0, .wr_enable = 0},
+ {.rd_enable = 0, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sar2130p_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sar2130p_cfg = {
+ .mdss_ver = &sar2130p_mdss_ver,
+ .caps = &sar2130p_dpu_caps,
+ .mdp = &sar2130p_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sar2130p_ctl),
+ .ctl = sar2130p_ctl,
+ .sspp_count = ARRAY_SIZE(sar2130p_sspp),
+ .sspp = sar2130p_sspp,
+ .mixer_count = ARRAY_SIZE(sar2130p_lm),
+ .mixer = sar2130p_lm,
+ .dspp_count = ARRAY_SIZE(sar2130p_dspp),
+ .dspp = sar2130p_dspp,
+ .pingpong_count = ARRAY_SIZE(sar2130p_pp),
+ .pingpong = sar2130p_pp,
+ .dsc_count = ARRAY_SIZE(sar2130p_dsc),
+ .dsc = sar2130p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sar2130p_merge_3d),
+ .merge_3d = sar2130p_merge_3d,
+ .wb_count = ARRAY_SIZE(sar2130p_wb),
+ .wb = sar2130p_wb,
+ .intf_count = ARRAY_SIZE(sar2130p_intf),
+ .intf = sar2130p_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &sar2130p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6417baa84f82..7243eebb85f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -20,43 +20,35 @@ static const struct dpu_caps x1e80100_dpu_caps = {
static const struct dpu_mdp_cfg x1e80100_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +131,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +139,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +147,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +155,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +163,14 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +181,18 @@ static const struct dpu_dspp_cfg x1e80100_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -213,55 +201,47 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -292,22 +272,20 @@ static const struct dpu_dsc_cfg x1e80100_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -316,7 +294,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -331,7 +309,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -340,7 +317,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -350,7 +326,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -360,7 +335,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -369,7 +343,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -378,7 +351,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -387,7 +359,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -396,7 +367,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -405,7 +375,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 0714936d8835..d4b545448d74 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -320,14 +320,22 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate, const struct msm_format *format)
+ struct dpu_plane_state *pstate,
+ const struct msm_format *format,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
- uint32_t blend_op;
- uint32_t fg_alpha, bg_alpha;
+ u32 blend_op;
+ u32 fg_alpha, bg_alpha, max_alpha;
- fg_alpha = pstate->base.alpha >> 8;
- bg_alpha = 0xff - fg_alpha;
+ if (mdss_ver->core_major_ver < 12) {
+ max_alpha = 0xff;
+ fg_alpha = pstate->base.alpha >> 8;
+ } else {
+ max_alpha = 0x3ff;
+ fg_alpha = pstate->base.alpha >> 6;
+ }
+ bg_alpha = max_alpha - fg_alpha;
/* default to opaque blending */
if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
@@ -337,7 +345,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
@@ -348,7 +356,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_FG_MOD_ALPHA |
DPU_BLEND_FG_INV_MOD_ALPHA |
@@ -402,7 +410,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct dpu_hw_stage_cfg *stage_cfg
)
{
- uint32_t lm_idx;
+ u32 lm_idx;
enum dpu_sspp sspp_idx;
struct drm_plane_state *state;
@@ -442,12 +450,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
const struct msm_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
-
- uint32_t lm_idx;
+ u32 lm_idx;
bool bg_alpha_enable = false;
- DECLARE_BITMAP(fetch_active, SSPP_MAX);
+ DECLARE_BITMAP(active_fetch, SSPP_MAX);
+ DECLARE_BITMAP(active_pipes, SSPP_MAX);
- memset(fetch_active, 0, sizeof(fetch_active));
+ memset(active_fetch, 0, sizeof(active_fetch));
+ memset(active_pipes, 0, sizeof(active_pipes));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -464,7 +473,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
- set_bit(pstate->pipe.sspp->idx, fetch_active);
+ set_bit(pstate->pipe.sspp->idx, active_fetch);
+ set_bit(pstate->pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -472,7 +482,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
&pstate->pipe, 0, stage_cfg);
if (pstate->r_pipe.sspp) {
- set_bit(pstate->r_pipe.sspp->idx, fetch_active);
+ set_bit(pstate->r_pipe.sspp->idx, active_fetch);
+ set_bit(pstate->r_pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -482,7 +493,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
/* blend config update */
for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format,
+ ctl->mdss_ver);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -492,8 +504,11 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
+
if (ctl->ops.set_active_pipes)
- ctl->ops.set_active_pipes(ctl, fetch_active);
+ ctl->ops.set_active_pipes(ctl, active_pipes);
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -510,6 +525,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
struct dpu_hw_stage_cfg stage_cfg;
+ DECLARE_BITMAP(active_lms, LM_MAX);
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -519,10 +535,18 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
+ if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
+ mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
+ if (mixer[i].lm_ctl->ops.set_active_pipes)
+ mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL);
+
+ if (mixer[i].hw_lm->ops.clear_all_blendstages)
+ mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm);
}
/* initialize stage cfg */
memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(active_lms, 0, sizeof(active_lms));
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
@@ -536,13 +560,22 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
ctl->ops.update_pending_flush_mixer(ctl,
mixer[i].hw_lm->idx);
+ set_bit(lm->idx, active_lms);
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, active_lms);
+
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0);
- ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &stage_cfg);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+
+ if (lm->ops.setup_blendstage)
+ lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
+ &stage_cfg);
}
}
@@ -709,7 +742,7 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
+ kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
}
/**
@@ -878,7 +911,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
dev = crtc->dev;
priv = dev->dev_private;
- if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) {
DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 862e9e6bf0a5..05e5f3463e30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -264,7 +264,7 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
mode = &phys_enc->cached_mode;
return phys_enc->hw_intf->cap->type == INTF_DP &&
- msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
+ msm_dp_needs_periph_flush(priv->kms->dp[disp_info->h_tile_instance[0]], mode);
}
/**
@@ -283,9 +283,9 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
index = disp_info->h_tile_instance[0];
if (disp_info->intf_type == INTF_DP)
- return msm_dp_wide_bus_available(priv->dp[index]);
+ return msm_dp_wide_bus_available(priv->kms->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
- return msm_dsi_wide_bus_enabled(priv->dsi[index]);
+ return msm_dsi_wide_bus_enabled(priv->kms->dsi[index]);
return false;
}
@@ -647,7 +647,7 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
int index = dpu_enc->disp_info.h_tile_instance[0];
if (dpu_enc->disp_info.intf_type == INTF_DSI)
- return msm_dsi_get_dsc_config(priv->dsi[index]);
+ return msm_dsi_get_dsc_config(priv->kms->dsi[index]);
return NULL;
}
@@ -709,7 +709,8 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
- if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
+ if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]],
+ adj_mode))
topology->num_cdm++;
}
}
@@ -980,7 +981,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
- queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
@@ -1246,7 +1247,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
return;
}
- phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ /* Use first (and only) CTL if active CTLs are supported */
+ if (num_ctl == 1)
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]);
+ else
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
@@ -2190,6 +2195,18 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
/* clear all blendstages */
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+
+ if (hw_mixer[i]->ops.clear_all_blendstages)
+ hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]);
+
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, NULL);
+
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, NULL);
+
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, NULL);
}
}
@@ -2686,8 +2703,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
static void dpu_encoder_frame_done_timeout(struct timer_list *t)
{
- struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
- frame_done_timer);
+ struct dpu_encoder_virt *dpu_enc = timer_container_of(dpu_enc, t,
+ frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
u32 event;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index da9994a79ca2..0ec6d67c7c70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -60,6 +60,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
return;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -67,7 +69,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
+ phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index abd6600046cb..0ba777bda253 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
- /* for DP/EDP, Shift timings to align it to bottom right */
- if (phys_enc->hw_intf->cap->type == INTF_DP) {
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+
+ /*
+ * For DP/EDP, Shift timings to align it to bottom right.
+ * wide_bus_en is set for everything excluding SDM845 &
+ * porch changes cause DisplayPort failure and HDMI tearing.
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
- timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
- timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
-
/*
* for DP, divide the horizonal parameters by 2 when
* widebus is enabled
@@ -298,6 +302,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
if (phys_enc->hw_cdm)
intf_cfg.cdm = phys_enc->hw_cdm->idx;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -307,8 +313,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
- &timing_params, fmt,
- phys_enc->dpu_kms->catalog->mdss_ver);
+ &timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
@@ -372,7 +377,8 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return phys_enc->split_role != ENC_ROLE_SOLO;
+ return !(phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5) &&
+ phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 849fea580a4c..56a5b596554d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -218,7 +218,6 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
- struct dpu_hw_ctl *ctl;
struct dpu_hw_cdm *hw_cdm;
if (!phys_enc) {
@@ -227,10 +226,9 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
}
hw_wb = phys_enc->hw_wb;
- ctl = phys_enc->hw_ctl;
hw_cdm = phys_enc->hw_cdm;
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
(phys_enc->hw_ctl &&
phys_enc->hw_ctl->ops.setup_intf_cfg)) {
struct dpu_hw_intf_cfg intf_cfg = {0};
@@ -534,7 +532,6 @@ static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
- struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -556,7 +553,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
* WB support is added to those targets will need to add
* the legacy teardown sequence as well.
*/
- if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
@@ -566,7 +563,6 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
const struct msm_format *format;
- struct msm_gem_address_space *aspace;
struct dpu_hw_wb_cfg *wb_cfg;
int ret;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
@@ -576,13 +572,12 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
wb_enc->wb_job = job;
wb_enc->wb_conn = job->connector;
- aspace = phys_enc->dpu_kms->base.aspace;
wb_cfg = &wb_enc->wb_cfg;
memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
- ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ ret = msm_framebuffer_prepare(job->fb, false);
if (ret) {
DPU_ERROR("prep fb failed, %d\n", ret);
return;
@@ -596,7 +591,7 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
return;
}
- dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest);
+ dpu_format_populate_addrs(job->fb, &wb_cfg->dest);
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
@@ -619,14 +614,11 @@ static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- struct msm_gem_address_space *aspace;
if (!job->fb)
return;
- aspace = phys_enc->dpu_kms->base.aspace;
-
- msm_framebuffer_cleanup(job->fb, aspace, false);
+ msm_framebuffer_cleanup(job->fb, false);
wb_enc->wb_job = NULL;
wb_enc->wb_conn = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 59c9427da7dd..b0d585c5315c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -274,15 +274,14 @@ int dpu_format_populate_plane_sizes(
return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout);
}
-static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_ubwc(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
uint32_t base_addr = 0;
bool meta;
- base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ base_addr = msm_framebuffer_iova(fb, 0);
fmt = msm_framebuffer_format(fb);
meta = MSM_FORMAT_IS_UBWC(fmt);
@@ -355,26 +354,23 @@ static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace
}
}
-static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_linear(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i)
- layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
-}
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, i);
+ }
/**
* dpu_format_populate_addrs - populate buffer addresses based on
* mmu, fb, and format found in the fb
- * @aspace: address space pointer
* @fb: framebuffer pointer
* @layout: format layout structure to populate
*/
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
@@ -384,7 +380,7 @@ void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
/* Populate the addresses given the fb */
if (MSM_FORMAT_IS_UBWC(fmt) ||
MSM_FORMAT_IS_TILE(fmt))
- _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ _dpu_format_populate_addrs_ubwc(fb, layout);
else
- _dpu_format_populate_addrs_linear(aspace, fb, layout);
+ _dpu_format_populate_addrs_linear(fb, layout);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index c6145d43aa3f..dc03f522e616 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -31,8 +31,7 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
return false;
}
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout);
int dpu_format_populate_plane_sizes(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 64265ca4656a..e824cd64fd3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -34,13 +34,13 @@
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
-#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
+#define VIG_SDM845_MASK_NO_SDMA \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK)
#define DMA_MSM8953_MASK \
(BIT(DPU_SSPP_QOS))
@@ -54,24 +54,24 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define DMA_SDM845_MASK \
- (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+#define DMA_SDM845_MASK_NO_SDMA \
+ (BIT(DPU_SSPP_QOS) | \
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
-#define DMA_CURSOR_SDM845_MASK \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+#define DMA_CURSOR_SDM845_MASK_NO_SDMA \
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_CURSOR))
#define DMA_SDM845_MASK_SDMA \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_SDM845_MASK_SDMA \
- (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_CURSOR_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_MSM8996_MASK \
(DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
@@ -89,45 +89,6 @@
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
-#define MIXER_SDM845_MASK \
- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define MIXER_QCM2290_MASK \
- (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define PINGPONG_MSM8996_MASK \
- (BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_MSM8996_TE2_MASK \
- (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
-
-#define PINGPONG_SDM845_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_SDM845_TE2_MASK \
- (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
-
-#define PINGPONG_SM8150_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define CTL_SC7280_MASK \
- (BIT(DPU_CTL_ACTIVE_CFG) | \
- BIT(DPU_CTL_FETCH_ACTIVE) | \
- BIT(DPU_CTL_VM_CFG) | \
- BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
-
-#define CTL_SM8550_MASK \
- (CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
-
-#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
-
-#define INTF_SC7180_MASK \
- (BIT(DPU_INTF_INPUT_CTRL) | \
- BIT(DPU_INTF_STATUS_SUPPORTED) | \
- BIT(DPU_DATA_HCTL_EN))
-
-#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-
#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
@@ -137,9 +98,6 @@
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP))
-#define WB_SM8250_MASK (WB_SDM845_MASK | \
- BIT(DPU_WB_INPUT_CTRL))
-
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -368,6 +326,9 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
_VIG_SBLK(SSPP_SCALER_VER(3, 3));
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 4));
+
static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK();
static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
@@ -376,8 +337,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
* MIXER sub blocks config
*************************************************************/
-/* MSM8998 */
-
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -387,8 +346,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
},
};
-/* SDM845 */
-
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -398,8 +355,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-/* SC7180 */
-
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -408,7 +363,15 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
-/* QCM2290 */
+static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ /* 0x40 + n*0x30 */
+ 0x40, 0x70, 0xa0, 0xd0, 0x100, 0x130, 0x160, 0x190, 0x1c0,
+ 0x1f0, 0x220
+ },
+};
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
@@ -431,25 +394,19 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
+static const struct dpu_dspp_sub_blks sm8750_dspp_sblk = {
+ .pcc = {.name = "pcc", .base = 0x1700,
+ .len = 0x90, .version = 0x60000},
+};
+
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
-static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
-};
static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
/* No dither block */
};
-static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
- .dither = {.name = "dither", .base = 0x30e0,
- .len = 0x20, .version = 0x10000},
-};
-
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
@@ -473,6 +430,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
.ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
};
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_0 = {
+ .enc = {.name = "enc", .base = 0x100, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF00, .len = 0x24},
+};
+
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_1 = {
+ .enc = {.name = "enc", .base = 0x200, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF80, .len = 0x24},
+};
+
/*************************************************************
* CDM block config
*************************************************************/
@@ -759,7 +726,8 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
-
+#include "catalog/dpu_9_1_sar2130p.h"
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
+#include "catalog/dpu_12_0_sm8750.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4cea19e1a203..a78bb2c334e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -29,26 +29,6 @@
#define MAX_XIN_COUNT 16
/**
- * MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
- * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
- * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
- * in a failure
- * @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
- * (moved into INTF block since DPU 5.0.0)
- * @DPU_MDP_MAX Maximum value
-
- */
-enum {
- DPU_MDP_PANIC_PER_PIPE = 0x1,
- DPU_MDP_10BIT_SUPPORT,
- DPU_MDP_AUDIO_SELECT,
- DPU_MDP_PERIPH_0_REMOVED,
- DPU_MDP_VSYNC_SEL,
- DPU_MDP_MAX
-};
-
-/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
@@ -57,7 +37,6 @@ enum {
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
* @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
* @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
- * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
* @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
* @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
* @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
@@ -75,7 +54,6 @@ enum {
DPU_SSPP_CSC_10BIT,
DPU_SSPP_CURSOR,
DPU_SSPP_QOS,
- DPU_SSPP_QOS_8LVL,
DPU_SSPP_EXCL_RECT,
DPU_SSPP_SMART_DMA_V1,
DPU_SSPP_SMART_DMA_V2,
@@ -88,20 +66,12 @@ enum {
/*
* MIXER sub-blocks/features
- * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
* @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
- * @DPU_MIXER_GC Gamma correction block
- * @DPU_DIM_LAYER Layer mixer supports dim layer
- * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
* @DPU_MIXER_MAX maximum value
*/
enum {
- DPU_MIXER_LAYER = 0x1,
- DPU_MIXER_SOURCESPLIT,
- DPU_MIXER_GC,
- DPU_DIM_LAYER,
- DPU_MIXER_COMBINED_ALPHA,
- DPU_MIXER_MAX
+ DPU_MIXER_SOURCESPLIT = 0x1,
+ DPU_MIXER_MAX,
};
/**
@@ -114,59 +84,16 @@ enum {
};
/**
- * PINGPONG sub-blocks
- * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
- * @DPU_PINGPONG_SPLIT PP block supports split fifo
- * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
- * @DPU_PINGPONG_DITHER Dither blocks
- * @DPU_PINGPONG_DSC PP block supports DSC
- * @DPU_PINGPONG_MAX
- */
-enum {
- DPU_PINGPONG_TE2 = 0x1,
- DPU_PINGPONG_SPLIT,
- DPU_PINGPONG_SLAVE,
- DPU_PINGPONG_DITHER,
- DPU_PINGPONG_DSC,
- DPU_PINGPONG_MAX
-};
-
-/**
* CTL sub-blocks
* @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
- * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
- * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
- * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register
- * @DPU_CTL_DSPP_BLOCK_FLUSH: CTL config to support dspp sub-block flush
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
- DPU_CTL_ACTIVE_CFG,
- DPU_CTL_FETCH_ACTIVE,
- DPU_CTL_VM_CFG,
- DPU_CTL_HAS_LAYER_EXT4,
- DPU_CTL_DSPP_SUB_BLOCK_FLUSH,
DPU_CTL_MAX
};
/**
- * INTF sub-blocks
- * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
- * pixel data arrives to this INTF
- * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
- * than video timing
- * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
- * @DPU_INTF_MAX
- */
-enum {
- DPU_INTF_INPUT_CTRL = 0x1,
- DPU_DATA_HCTL_EN,
- DPU_INTF_STATUS_SUPPORTED,
- DPU_INTF_MAX
-};
-
-/**
* WB sub-blocks and features
* @DPU_WB_LINE_MODE Writeback module supports line/linear mode
* @DPU_WB_BLOCK_MODE Writeback module supports block mode read
@@ -182,8 +109,6 @@ enum {
* @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
* @DPU_WB_CDP Writeback supports client driven prefetch
- * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
- * data arrives.
* @DPU_WB_CROP CWB supports cropping
* @DPU_WB_MAX maximum value
*/
@@ -197,7 +122,6 @@ enum {
DPU_WB_QOS,
DPU_WB_QOS_8LVL,
DPU_WB_CDP,
- DPU_WB_INPUT_CTRL,
DPU_WB_CROP,
DPU_WB_MAX
};
@@ -216,16 +140,11 @@ enum {
/**
* DSC sub-blocks/features
- * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
- * the pixel output from this DSC.
- * @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2
* @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding
* @DPU_DSC_MAX
*/
enum {
- DPU_DSC_OUTPUT_CTRL = 0x1,
- DPU_DSC_HW_REV_1_2,
- DPU_DSC_NATIVE_42x_EN,
+ DPU_DSC_NATIVE_42x_EN = 0x1,
DPU_DSC_MAX
};
@@ -235,14 +154,12 @@ enum {
* @id: enum identifying this block
* @base: register base offset to mdss
* @len: length of hardware block
- * @features bit mask identifying sub-blocks/features
*/
#define DPU_HW_BLK_INFO \
char name[DPU_HW_BLK_NAME_LEN]; \
u32 id; \
u32 base; \
- u32 len; \
- unsigned long features
+ u32 len
/**
* struct dpu_scaler_blk: Scaler information
@@ -404,8 +321,6 @@ struct dpu_dspp_sub_blks {
};
struct dpu_pingpong_sub_blks {
- struct dpu_pp_blk te;
- struct dpu_pp_blk te2;
struct dpu_pp_blk dither;
};
@@ -459,7 +374,6 @@ struct dpu_clk_ctrl_reg {
/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
- * @features bit mask identifying sub-blocks/features
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -475,6 +389,7 @@ struct dpu_mdp_cfg {
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
unsigned int intr_start;
};
@@ -490,6 +405,7 @@ struct dpu_ctl_cfg {
*/
struct dpu_sspp_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_sspp_sub_blks *sblk;
u32 xin_id;
enum dpu_clk_ctrl_type clk_ctrl;
@@ -507,6 +423,7 @@ struct dpu_sspp_cfg {
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 dspp;
@@ -517,7 +434,6 @@ struct dpu_lm_cfg {
* struct dpu_dspp_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* supported by this block
* @sblk sub-blocks information
*/
@@ -530,7 +446,6 @@ struct dpu_dspp_cfg {
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @intr_done: index for PINGPONG done interrupt
* @intr_rdptr: index for PINGPONG readpointer done interrupt
* @sblk sub-blocks information
@@ -547,8 +462,6 @@ struct dpu_pingpong_cfg {
* struct dpu_merge_3d_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * supported by this block
* @sblk sub-blocks information
*/
struct dpu_merge_3d_cfg {
@@ -566,6 +479,7 @@ struct dpu_merge_3d_cfg {
*/
struct dpu_dsc_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_dsc_sub_blks *sblk;
};
@@ -573,7 +487,6 @@ struct dpu_dsc_cfg {
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
@@ -604,6 +517,7 @@ struct dpu_intf_cfg {
*/
struct dpu_wb_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
@@ -672,6 +586,7 @@ struct dpu_vbif_qos_tbl {
*/
struct dpu_vbif_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 xin_halt_timeout;
@@ -689,7 +604,6 @@ struct dpu_vbif_cfg {
* @name string name for debug purposes
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
*/
struct dpu_cdm_cfg {
DPU_HW_BLK_INFO;
@@ -841,6 +755,7 @@ extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sar2130p_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
@@ -863,6 +778,7 @@ extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sa8775p_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8750_cfg;
extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 411a7cf088eb..ac834db2e4c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -42,6 +42,8 @@
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
+#define CTL_PIPE_ACTIVE 0x12c
+#define CTL_LAYER_ACTIVE 0x130
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -64,6 +66,8 @@ static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, 4, 5};
+static const u32 lm_tbl[LM_MAX] = {CTL_INVALID_BIT, 0, 1, 2, 3, 4, 5, 6, 7};
+
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
@@ -261,6 +265,12 @@ static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
+ case LM_6:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ case LM_7:
+ ctx->pending_flush_mask |= BIT(27);
+ break;
default:
break;
}
@@ -549,7 +559,7 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
- if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 9)
DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
}
@@ -563,12 +573,13 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 cwb_active = 0;
u32 mode_sel = 0;
+ u32 merge_3d_active = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
* added in SW. Power on reset value is not disable.
*/
- if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 7)
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
@@ -578,6 +589,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -591,15 +603,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
dsc_active |= cfg->dsc;
+ if (cfg->merge_3d)
+ merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
- if (cfg->merge_3d)
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
- BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->intf_master)
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
@@ -643,6 +658,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 intf_master = 0;
u32 wb_active = 0;
u32 cwb_active = 0;
u32 merge3d_active = 0;
@@ -664,12 +680,30 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
merge3d_active);
}
- dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.clear_all_blendstages)
+ ctx->ops.clear_all_blendstages(ctx);
+
+ if (ctx->ops.set_active_lms)
+ ctx->ops.set_active_lms(ctx, NULL);
+
+ if (ctx->ops.set_active_fetch_pipes)
+ ctx->ops.set_active_fetch_pipes(ctx, NULL);
+
+ if (ctx->ops.set_active_pipes)
+ ctx->ops.set_active_pipes(ctx, NULL);
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+
+ intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
+
+ /* Unset this intf as master, if it is the current master */
+ if (intf_master == BIT(cfg->intf - INTF_0)) {
+ DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
+ }
}
if (cfg->cwb) {
@@ -697,8 +731,8 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
}
}
-static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
- unsigned long *fetch_active)
+static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
{
int i;
u32 val = 0;
@@ -714,55 +748,39 @@ static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
-static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
- unsigned long cap)
+static void dpu_hw_ctl_set_active_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes)
{
- if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
- ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf_v1;
+ int i;
+ u32 val = 0;
- ops->update_pending_flush_periph =
- dpu_hw_ctl_update_pending_flush_periph_v1;
+ if (active_pipes) {
+ for (i = 0; i < SSPP_MAX; i++) {
+ if (test_bit(i, active_pipes) &&
+ fetch_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(fetch_tbl[i]);
+ }
+ }
- ops->update_pending_flush_merge_3d =
- dpu_hw_ctl_update_pending_flush_merge_3d_v1;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
- ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
- ops->update_pending_flush_dsc =
- dpu_hw_ctl_update_pending_flush_dsc_v1;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
- } else {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ DPU_REG_WRITE(&ctx->hw, CTL_PIPE_ACTIVE, val);
+}
+
+static void dpu_hw_ctl_set_active_lms(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms)
+{
+ int i;
+ u32 val = 0;
+
+ if (active_lms) {
+ for (i = LM_0; i < LM_MAX; i++) {
+ if (test_bit(i, active_lms) &&
+ lm_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(lm_tbl[i]);
+ }
}
- ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
- ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
- ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->get_flush_register = dpu_hw_ctl_get_flush_register;
- ops->trigger_start = dpu_hw_ctl_trigger_start;
- ops->is_started = dpu_hw_ctl_is_started;
- ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->reset = dpu_hw_ctl_reset_control;
- ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
- ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
- ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
- ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
- if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
- else
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
- if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
-};
+ DPU_REG_WRITE(&ctx->hw, CTL_LAYER_ACTIVE, val);
+}
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
@@ -770,12 +788,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer)
{
@@ -789,7 +809,59 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
c->hw.log_mask = DPU_DBG_MASK_CTL;
c->caps = cfg;
- _setup_ctl_ops(&c->ops, c->caps->features);
+ c->mdss_ver = mdss_ver;
+
+ if (mdss_ver->core_major_ver >= 5) {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ c->ops.reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf_v1;
+
+ c->ops.update_pending_flush_periph =
+ dpu_hw_ctl_update_pending_flush_periph_v1;
+
+ c->ops.update_pending_flush_merge_3d =
+ dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ c->ops.update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
+ c->ops.update_pending_flush_dsc =
+ dpu_hw_ctl_update_pending_flush_dsc_v1;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
+ } else {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ }
+ c->ops.clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ c->ops.update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ c->ops.get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ c->ops.get_flush_register = dpu_hw_ctl_get_flush_register;
+ c->ops.trigger_start = dpu_hw_ctl_trigger_start;
+ c->ops.is_started = dpu_hw_ctl_is_started;
+ c->ops.trigger_pending = dpu_hw_ctl_trigger_pending;
+ c->ops.reset = dpu_hw_ctl_reset_control;
+ c->ops.wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ c->ops.setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ } else {
+ c->ops.set_active_pipes = dpu_hw_ctl_set_active_pipes;
+ c->ops.set_active_lms = dpu_hw_ctl_set_active_lms;
+ }
+ c->ops.update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ c->ops.update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
+ else
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
+
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
+
c->idx = cfg->id;
c->mixer_count = mixer_count;
c->mixer_hw_caps = mixer;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 080a9550a0cc..15931b22ec94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -36,6 +36,7 @@ struct dpu_hw_stage_cfg {
/**
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
+ * @intf_master: Master interface id in the dual pipe topology
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -46,6 +47,7 @@ struct dpu_hw_stage_cfg {
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_intf intf_master;
enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
@@ -254,8 +256,25 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
- void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
+
+ /**
+ * Set active pipes attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_pipes: bitmap of enum dpu_sspp
+ */
+ void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes);
+
+ /**
+ * Set active layer mixers attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_lms: bitmap of enum dpu_lm
+ */
+ void (*set_active_lms)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms);
+
};
/**
@@ -272,6 +291,7 @@ struct dpu_hw_ctl_ops {
* @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
+ * @mdss_ver: MDSS revision information
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -293,6 +313,8 @@ struct dpu_hw_ctl {
u32 pending_dsc_flush_mask;
u32 pending_cdm_flush_mask;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_ctl_ops ops;
};
@@ -310,6 +332,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index cec6d4e8baec..3a149caa7ff4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -181,26 +181,18 @@ static void dpu_hw_dsc_bind_pingpong_blk(
DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
}
-static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
- unsigned long cap)
-{
- ops->dsc_disable = dpu_hw_dsc_disable;
- ops->dsc_config = dpu_hw_dsc_config;
- ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
- if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
- ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
-};
-
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
* @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_dsc *c;
@@ -213,7 +205,12 @@ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dsc_ops(&c->ops, c->caps->features);
+
+ c->ops.dsc_disable = dpu_hw_dsc_disable;
+ c->ops.dsc_config = dpu_hw_dsc_config;
+ c->ops.dsc_config_thresh = dpu_hw_dsc_config_thresh;
+ if (mdss_ver->core_major_ver >= 5)
+ c->ops.dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index fc171bdeca48..b7013c9822d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,7 +64,8 @@ struct dpu_hw_dsc {
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index b9c433567262..b3395e9c34a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -360,8 +360,7 @@ static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
}
-static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
- const unsigned long features)
+static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops)
{
ops->dsc_disable = dpu_hw_dsc_disable_1_2;
ops->dsc_config = dpu_hw_dsc_config_1_2;
@@ -391,7 +390,7 @@ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dcs_ops_1_2(&c->ops, c->caps->features);
+ _setup_dcs_ops_1_2(&c->ops);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 829ca272873e..11fb1bc54fa9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -63,13 +63,6 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
}
-static void _setup_dspp_ops(struct dpu_hw_dspp *c,
- unsigned long features)
-{
- if (test_bit(DPU_DSPP_PCC, &features))
- c->ops.setup_pcc = dpu_setup_dspp_pcc;
-}
-
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
@@ -97,7 +90,8 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_dspp_ops(c, c->cap->features);
+ if (c->cap->sblk->pcc.base)
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index fb1d25baa518..a80ac82a9625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -98,8 +98,7 @@
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver)
+ const struct msm_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 hsync_period, vsync_period;
@@ -180,7 +179,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
/* TODO: handle DSC+DP case, we only handle DSC+DSI case so far */
if (p->compression_en && !dp_intf &&
- mdss_ver->core_major_ver >= 7)
+ intf->mdss_ver->core_major_ver >= 7)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
hsync_data_start_x = hsync_start_x;
@@ -238,7 +237,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
- if (intf->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ if (intf->mdss_ver->core_major_ver >= 5) {
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
@@ -309,9 +308,8 @@ static void dpu_hw_intf_get_status(
struct dpu_hw_intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
- unsigned long cap = intf->cap->features;
- if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
+ if (intf->mdss_ver->core_major_ver >= 5)
s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
else
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
@@ -580,6 +578,8 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->idx = cfg->id;
c->cap = cfg;
+ c->mdss_ver = mdss_rev;
+
c->ops.setup_timing_gen = dpu_hw_intf_setup_timing_engine;
c->ops.setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
c->ops.get_status = dpu_hw_intf_get_status;
@@ -588,7 +588,7 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->ops.setup_misr = dpu_hw_intf_setup_misr;
c->ops.collect_misr = dpu_hw_intf_collect_misr;
- if (cfg->features & BIT(DPU_INTF_INPUT_CTRL))
+ if (mdss_rev->core_major_ver >= 5)
c->ops.bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
/* INTF TE is only for DSI interfaces */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 114be272ac0a..f31067a9aaf1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -81,8 +81,7 @@ struct dpu_hw_intf_cmd_mode_cfg {
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver);
+ const struct msm_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch);
@@ -126,6 +125,8 @@ struct dpu_hw_intf {
enum dpu_intf idx;
const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_intf_ops ops;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 81b56f066519..e8a76d5192c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -19,12 +19,28 @@
/* These register are offset to mixer base + stage base */
#define LM_BLEND0_OP 0x00
+
+/* <v12 DPU with offset to mixer base + stage base */
#define LM_BLEND0_CONST_ALPHA 0x04
#define LM_FG_COLOR_FILL_COLOR_0 0x08
#define LM_FG_COLOR_FILL_COLOR_1 0x0C
#define LM_FG_COLOR_FILL_SIZE 0x10
#define LM_FG_COLOR_FILL_XY 0x14
+/* >= v12 DPU */
+#define LM_BG_SRC_SEL_V12 0x14
+#define LM_BG_SRC_SEL_V12_RESET_VALUE 0x0000c0c0
+#define LM_BORDER_COLOR_0_V12 0x1c
+#define LM_BORDER_COLOR_1_V12 0x20
+
+/* >= v12 DPU with offset to mixer base + stage base */
+#define LM_BLEND0_FG_SRC_SEL_V12 0x04
+#define LM_BLEND0_CONST_ALPHA_V12 0x08
+#define LM_FG_COLOR_FILL_COLOR_0_V12 0x0c
+#define LM_FG_COLOR_FILL_COLOR_1_V12 0x10
+#define LM_FG_COLOR_FILL_SIZE_V12 0x14
+#define LM_FG_COLOR_FILL_XY_V12 0x18
+
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
@@ -83,6 +99,22 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
+static void dpu_hw_lm_setup_border_color_v12(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0_V12,
+ (color->color_0 & 0x3ff) |
+ ((color->color_1 & 0x3ff) << 16));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1_V12,
+ (color->color_2 & 0x3ff) |
+ ((color->color_3 & 0x3ff) << 16));
+ }
+}
+
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
{
dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
@@ -112,6 +144,27 @@ static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
+static void
+dpu_hw_lm_setup_blend_config_combined_alpha_v12(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha,
+ u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0x3ff) | ((fg_alpha & 0x3ff) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA_V12 + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
@@ -144,18 +197,146 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
- unsigned long features)
+static void dpu_hw_lm_setup_color3_v12(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode, stages, stage_off, i;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return;
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ /* set color_out3 bit in blend0_op when enabled in mixer_op_mode */
+ op_mode = DPU_REG_READ(c, LM_BLEND0_OP + stage_off);
+ if (mixer_op_mode & BIT(i))
+ op_mode |= BIT(30);
+ else
+ op_mode &= ~BIT(30);
+
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, op_mode);
+ }
+}
+
+static int _set_staged_sspp(u32 stage, struct dpu_hw_stage_cfg *stage_cfg,
+ int pipes_per_stage, u32 *value)
{
- ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ int i;
+ u32 pipe_type = 0, pipe_id = 0, rec_id = 0;
+ u32 src_sel[PIPES_PER_STAGE];
+
+ *value = LM_BG_SRC_SEL_V12_RESET_VALUE;
+ if (!stage_cfg || !pipes_per_stage)
+ return 0;
+
+ for (i = 0; i < pipes_per_stage; i++) {
+ enum dpu_sspp pipe = stage_cfg->stage[stage][i];
+ enum dpu_sspp_multirect_index rect_index = stage_cfg->multirect_index[stage][i];
+
+ src_sel[i] = LM_BG_SRC_SEL_V12_RESET_VALUE;
+
+ if (!pipe)
+ continue;
+
+ /* translate pipe data to SWI pipe_type, pipe_id */
+ if (pipe >= SSPP_DMA0 && pipe <= SSPP_DMA5) {
+ pipe_type = 0;
+ pipe_id = pipe - SSPP_DMA0;
+ } else if (pipe >= SSPP_VIG0 && pipe <= SSPP_VIG3) {
+ pipe_type = 1;
+ pipe_id = pipe - SSPP_VIG0;
+ } else {
+ DPU_ERROR("invalid rec-%d pipe:%d\n", i, pipe);
+ return -EINVAL;
+ }
+
+ /* translate rec data to SWI rec_id */
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ rec_id = 0;
+ } else if (rect_index == DPU_SSPP_RECT_1) {
+ rec_id = 1;
+ } else {
+ DPU_ERROR("invalid rec-%d rect_index:%d\n", i, rect_index);
+ rec_id = 0;
+ }
+
+ /* calculate SWI value for rec-0 and rec-1 and store it temporary buffer */
+ src_sel[i] = (((pipe_type & 0x3) << 6) | ((rec_id & 0x3) << 4) | (pipe_id & 0xf));
+ }
+
+ /* calculate final SWI register value for rec-0 and rec-1 */
+ *value = 0;
+ for (i = 0; i < pipes_per_stage; i++)
+ *value |= src_sel[i] << (i * 8);
+
+ return 0;
+}
+
+static int dpu_hw_lm_setup_blendstage(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, ret, stages, stage_off, pipes_per_stage;
+ u32 value;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT, &ctx->cap->features))
+ pipes_per_stage = PIPES_PER_STAGE;
else
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
- ops->setup_alpha_out = dpu_hw_lm_setup_color3;
- ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
+ pipes_per_stage = 1;
+
+ /*
+ * When stage configuration is empty, we can enable the
+ * border color by setting the corresponding LAYER_ACTIVE bit
+ * and un-staging all the pipes from the layer mixer.
+ */
+ if (!stage_cfg)
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ ret = _set_staged_sspp(i, stage_cfg, pipes_per_stage, &value);
+ if (ret)
+ return ret;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off, value);
+ }
+
+ return 0;
+}
+
+static int dpu_hw_lm_clear_all_blendstages(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, stages, stage_off;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off,
+ LM_BG_SRC_SEL_V12_RESET_VALUE);
+ }
+
+ return 0;
}
/**
@@ -164,10 +345,12 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: DPU core's major and minor versions
*/
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *c;
@@ -186,7 +369,24 @@ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_mixer_ops(&c->ops, c->cap->features);
+ c->ops.setup_mixer_out = dpu_hw_lm_setup_out;
+ if (mdss_ver->core_major_ver >= 12)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha_v12;
+ else if (mdss_ver->core_major_ver >= 4)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ else
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color;
+ } else {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3_v12;
+ c->ops.setup_blendstage = dpu_hw_lm_setup_blendstage;
+ c->ops.clear_all_blendstages = dpu_hw_lm_clear_all_blendstages;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color_v12;
+ }
+ c->ops.setup_misr = dpu_hw_lm_setup_misr;
+ c->ops.collect_misr = dpu_hw_lm_collect_misr;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 6f60fa9b3cd7..1b9ecd082d7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -11,6 +11,7 @@
#include "dpu_hw_util.h"
struct dpu_hw_mixer;
+struct dpu_hw_stage_cfg;
struct dpu_hw_mixer_cfg {
u32 out_width;
@@ -49,6 +50,23 @@ struct dpu_hw_lm_ops {
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
+ * Clear layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * Returns: 0 on success or -error
+ */
+ int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * @lm : layer mixer enumeration
+ * @stage_cfg : blend stage configuration
+ * Returns: 0 on success or -error
+ */
+ int (*setup_blendstage)(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg);
+
+ /**
* setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
@@ -95,6 +113,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 8d820cd1b554..175639c8bfbb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -125,6 +125,7 @@ enum dpu_lm {
LM_4,
LM_5,
LM_6,
+ LM_7,
LM_MAX
};
@@ -169,6 +170,8 @@ enum dpu_dsc {
DSC_3,
DSC_4,
DSC_5,
+ DSC_6,
+ DSC_7,
DSC_MAX
};
@@ -185,6 +188,8 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
PINGPONG_CWB_0,
PINGPONG_CWB_1,
PINGPONG_CWB_2,
@@ -199,6 +204,7 @@ enum dpu_merge_3d {
MERGE_3D_2,
MERGE_3D_3,
MERGE_3D_4,
+ MERGE_3D_5,
MERGE_3D_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 0b3325f9c870..83b1dbecddd2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -33,8 +33,7 @@ static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
}
}
-static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
- unsigned long features)
+static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c)
{
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
@@ -62,7 +61,7 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_merge_3d_ops(c, c->caps->features);
+ _setup_merge_3d_ops(c);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 36c0ec775b92..138071be5649 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -319,13 +319,13 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
- if (test_bit(DPU_PINGPONG_DSC, &cfg->features)) {
+ if (mdss_rev->core_major_ver < 7) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
- if (test_bit(DPU_PINGPONG_DITHER, &cfg->features))
+ if (mdss_rev->core_major_ver >= 3)
c->ops.setup_dither = dpu_hw_pp_setup_dither;
return c;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 32c7c8084553..6f1fc790ad6d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -10,11 +10,11 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
-#include "msm_mdss.h"
-
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
+#include <linux/soc/qcom/ubwc.h>
+
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* SSPP registers */
@@ -543,7 +543,7 @@ static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
return;
_dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
- test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
+ ctx->mdss_ver->core_major_ver >= 4,
cfg);
}
@@ -684,7 +684,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
@@ -703,6 +703,9 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
+
+ hw_pipe->mdss_ver = mdss_rev;
+
_setup_layer_ops(hw_pipe, hw_pipe->cap->features, mdss_rev);
return hw_pipe;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 56a0edf2a57c..bdac5c04bf79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -308,12 +308,14 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_sspp {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
- const struct msm_mdss_data *ubwc;
+ const struct qcom_ubwc_cfg_data *ubwc;
/* Pipe */
enum dpu_sspp idx;
const struct dpu_sspp_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* Ops */
struct dpu_hw_sspp_ops ops;
};
@@ -323,7 +325,7 @@ struct dpu_kms;
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 562a3f4c5238..96dc10589bee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -264,15 +264,15 @@ static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap, const struct dpu_mdss_version *mdss_rev)
+ const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
- if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ if (mdss_rev->core_major_ver < 5)
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ else if (mdss_rev->core_major_ver < 8)
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
@@ -280,7 +280,8 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (mdss_rev->core_major_ver >= 5)
ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
- if (cap & BIT(DPU_MDP_AUDIO_SELECT))
+ if (mdss_rev->core_major_ver == 4 ||
+ mdss_rev->core_major_ver == 5)
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -312,7 +313,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
+ _setup_mdp_ops(&mdp->ops, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 4853e516c487..478a091aeccf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -208,7 +208,7 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
- if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ if (mdss_rev->core_major_ver >= 5)
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
if (mdss_rev->core_major_ver >= 9)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 3305ad0623ca..12dcb32b4724 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -20,9 +20,10 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
#include "msm_mmu.h"
-#include "msm_mdss.h"
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
@@ -582,7 +583,7 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_display_info info;
int i, rc = 0;
- if (!(priv->dsi[0] || priv->dsi[1]))
+ if (!(priv->kms->dsi[0] || priv->kms->dsi[1]))
return rc;
/*
@@ -593,26 +594,26 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
*
* TODO: Support swapping DSI0 and DSI1 in the bonded setup.
*/
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dsi); i++) {
int other = (i + 1) % 2;
- if (!priv->dsi[i])
+ if (!priv->kms->dsi[i])
continue;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
- !msm_dsi_is_master_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->kms->dsi[i]))
continue;
memset(&info, 0, sizeof(info));
info.intf_type = INTF_DSI;
info.h_tile_instance[info.num_of_h_tiles++] = i;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]))
info.h_tile_instance[info.num_of_h_tiles++] = other;
- info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->kms->dsi[i]);
- rc = dpu_kms_dsi_set_te_source(&info, priv->dsi[i]);
+ rc = dpu_kms_dsi_set_te_source(&info, priv->kms->dsi[i]);
if (rc) {
DPU_ERROR("failed to identify TE source for dsi display\n");
return rc;
@@ -624,15 +625,15 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ rc = msm_dsi_modeset_init(priv->kms->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
- rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) && priv->kms->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->kms->dsi[other], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
other, rc);
@@ -654,8 +655,8 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
int rc;
int i;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dp); i++) {
+ if (!priv->kms->dp[i])
continue;
memset(&info, 0, sizeof(info));
@@ -670,7 +671,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
}
yuv_supported = !!dpu_kms->catalog->cdm;
- rc = msm_dp_modeset_init(priv->dp[i], dev, encoder, yuv_supported);
+ rc = msm_dp_modeset_init(priv->kms->dp[i], dev, encoder, yuv_supported);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -688,7 +689,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
struct msm_display_info info;
int rc;
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
return 0;
memset(&info, 0, sizeof(info));
@@ -702,7 +703,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ rc = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -874,12 +875,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
ret = PTR_ERR(crtc);
return ret;
}
- priv->num_crtcs++;
}
/* All CRTCs are compatible with all encoders */
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
}
@@ -1022,7 +1022,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->wb[i].base, "%s",
cat->wb[i].name);
- if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
+ if (dpu_kms->catalog->mdss_ver->core_major_ver >= 8) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
dpu_kms->mmio + cat->mdp[0].base, "top");
msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
@@ -1043,7 +1043,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
"%s", cat->dsc[i].name);
- if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
+ if (cat->mdss_ver->core_major_ver >= 7) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
@@ -1095,26 +1095,26 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
- if (!dpu_kms->base.aspace)
+ if (!dpu_kms->base.vm)
return;
- mmu = dpu_kms->base.aspace->mmu;
+ mmu = to_msm_vm(dpu_kms->base.vm)->mmu;
mmu->funcs->detach(mmu);
- msm_gem_address_space_put(dpu_kms->base.aspace);
+ drm_gpuvm_put(dpu_kms->base.vm);
- dpu_kms->base.aspace = NULL;
+ dpu_kms->base.vm = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_kms_init_aspace(dpu_kms->dev);
- if (IS_ERR(aspace))
- return PTR_ERR(aspace);
+ vm = msm_kms_init_vm(dpu_kms->dev);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
- dpu_kms->base.aspace = aspace;
+ dpu_kms->base.vm = vm;
return 0;
}
@@ -1189,10 +1189,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
- dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
+ dpu_kms->mdss = qcom_ubwc_config_get_data();
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
- DPU_ERROR("failed to get MDSS data: %d\n", rc);
+ DPU_ERROR("failed to get UBWC config data: %d\n", rc);
goto err_pm_put;
}
@@ -1512,6 +1512,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
{ .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
+ { .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
@@ -1532,6 +1533,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
+ { .compatible = "qcom,sm8750-dpu", .data = &dpu_sm8750_cfg, },
{ .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
{}
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index a57ec2ec1060..993cf512f8c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -60,7 +60,7 @@ struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
const struct dpu_mdss_cfg *catalog;
- const struct msm_mdss_data *mdss;
+ const struct qcom_ubwc_cfg_data *mdss;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index e03d6091f736..01171c535a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -17,8 +17,9 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
-#include "msm_mdss.h"
#include "dpu_kms.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
@@ -71,7 +72,7 @@ static const uint32_t qcom_compressed_supported_formats[] = {
/*
* struct dpu_plane - local dpu plane structure
- * @aspace: address space pointer
+ * @vm: address space pointer
* @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -646,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -654,9 +654,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- /* cache aspace */
- pstate->aspace = kms->base.aspace;
-
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
* we can use msm_atomic_prepare_fb() instead of doing the
@@ -664,13 +661,10 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
*/
drm_gem_plane_helper_prepare_fb(plane, new_state);
- if (pstate->aspace) {
- ret = msm_framebuffer_prepare(new_state->fb,
- pstate->aspace, pstate->needs_dirtyfb);
- if (ret) {
- DPU_ERROR("failed to prepare framebuffer\n");
- return ret;
- }
+ ret = msm_framebuffer_prepare(new_state->fb, pstate->needs_dirtyfb);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
}
return 0;
@@ -689,8 +683,7 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
- msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
- old_pstate->needs_dirtyfb);
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->needs_dirtyfb);
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
@@ -915,10 +908,9 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
-static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- uint32_t max_linewidth)
+static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt)
{
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
@@ -930,10 +922,6 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
- return false;
-
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -941,6 +929,27 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
return true;
}
+static int dpu_plane_is_parallel_capable(struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ return true;
+}
+
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ return dpu_plane_is_multirect_capable(sspp, pipe_cfg, fmt) &&
+ dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth);
+}
+
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -1002,6 +1011,69 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp
return true;
}
+static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
+ struct dpu_plane_state *prev_adjacent_pstate,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe;
+ struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg;
+ const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb);
+ u16 max_tile_height = 1;
+
+ if (prev_adjacent_pstate->r_pipe.sspp != NULL ||
+ prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
+ return false;
+
+ if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
+ !dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt))
+ max_tile_height = max(max_tile_height, fmt->tile_height);
+
+ if (MSM_FORMAT_IS_UBWC(prev_fmt))
+ max_tile_height = max(max_tile_height, prev_fmt->tile_height);
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->sspp = NULL;
+
+ if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) &&
+ dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) &&
+ (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 ||
+ prev_pipe_cfg->dst_rect.x1 >= pipe_cfg->dst_rect.x2)) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ return true;
+ }
+
+ if (pipe_cfg->dst_rect.y1 >= prev_pipe_cfg->dst_rect.y2 + 2 * max_tile_height ||
+ prev_pipe_cfg->dst_rect.y1 >= pipe_cfg->dst_rect.y2 + 2 * max_tile_height) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ return true;
+ }
+
+ return false;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1102,13 +1174,14 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
- struct drm_plane_state *plane_state)
+ struct drm_plane_state *plane_state,
+ struct drm_plane_state *prev_adjacent_plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
- struct dpu_plane_state *pstate;
+ struct dpu_plane_state *pstate, *prev_adjacent_pstate;
struct dpu_sw_pipe *pipe;
struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
@@ -1120,6 +1193,8 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
+ prev_adjacent_pstate = prev_adjacent_plane_state ?
+ to_dpu_plane_state(prev_adjacent_plane_state) : NULL;
pipe = &pstate->pipe;
r_pipe = &pstate->r_pipe;
pipe_cfg = &pstate->pipe_cfg;
@@ -1138,24 +1213,42 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
+ if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) {
+ if (!prev_adjacent_pstate ||
+ !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt,
+ dpu_kms->catalog->caps->max_linewidth)) {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
- if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
- pipe->sspp,
- msm_framebuffer_format(plane_state->fb),
- dpu_kms->catalog->caps->max_linewidth)) {
- /* multirect is not possible, use two SSPP blocks */
- r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!r_pipe->sspp)
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+ } else {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
return -ENODEV;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
@@ -1168,6 +1261,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
+ struct drm_plane_state *prev_adjacent_plane_state = NULL;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1177,9 +1271,12 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
continue;
int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
- state, plane_state);
+ state, plane_state,
+ prev_adjacent_plane_state);
if (ret)
- return ret;
+ break;
+
+ prev_adjacent_plane_state = plane_state;
}
return 0;
@@ -1353,7 +1450,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
- dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout);
+ dpu_format_populate_addrs(new_state->fb, &pstate->layout);
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index acd5725175cd..a3a6e9028333 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -17,7 +17,6 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @aspace: pointer to address space for input/output buffers
* @pipe: software pipe description
* @r_pipe: software pipe description of the second pipe
* @pipe_cfg: software pipe configuration
@@ -34,7 +33,6 @@
*/
struct dpu_plane_state {
struct drm_plane_state base;
- struct msm_gem_address_space *aspace;
struct dpu_sw_pipe pipe;
struct dpu_sw_pipe r_pipe;
struct dpu_sw_pipe_cfg pipe_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 3efbba425ca6..25382120cb1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -40,7 +40,7 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio)
{
int rc, i;
@@ -53,12 +53,14 @@ int dpu_rm_init(struct drm_device *dev,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+ rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5);
+
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(dev, lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
@@ -140,7 +142,7 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mdss_ver, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
@@ -166,10 +168,10 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_dsc *hw;
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
- if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
+ if (cat->mdss_ver->core_major_ver >= 7)
hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dev, dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
@@ -434,20 +436,19 @@ static int _dpu_rm_reserve_ctls(
int i = 0, j, num_ctls;
bool needs_split_display;
- /*
- * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
- * control path.
- *
- * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
- * writeback and real-time encoders must be driven by the same control
- * path
- */
- if (top->cwb_enabled)
- num_ctls = 1;
- else
+ if (rm->has_legacy_ctls) {
+ /*
+ * TODO: check if there is a need for special handling if
+ * DPU < 5.0 get CWB support.
+ */
num_ctls = top->num_intf;
- needs_split_display = _dpu_rm_needs_split_display(top);
+ needs_split_display = _dpu_rm_needs_split_display(top);
+ } else {
+ /* use single CTL */
+ num_ctls = 1;
+ needs_split_display = false;
+ }
for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
const struct dpu_hw_ctl *ctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index a19dbdb1b6f4..ccd64404f12d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -24,6 +24,7 @@ struct dpu_global_state;
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
+ * @has_legacy_ctls: DPU uses pre-ACTIVE CTL blocks.
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -37,6 +38,7 @@ struct dpu_rm {
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
struct dpu_hw_blk *cdm_blk;
+ bool has_legacy_ctls;
};
struct dpu_rm_sspp_requirements {
@@ -67,7 +69,7 @@ struct msm_display_topology {
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio);
int dpu_rm_reserve(struct dpu_rm *rm,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b8610aa806ea..da53ca88251e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -17,7 +17,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
- int id;
int ovlp;
enum mdp4_dma dma;
bool enabled;
@@ -120,7 +119,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -369,7 +368,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->vm, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -427,7 +426,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm, &iova);
if (ret)
goto fail;
} else {
@@ -511,7 +510,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_CURSOR) {
update_cursor(crtc);
- drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
}
}
@@ -539,7 +538,7 @@ static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
mdp4_crtc->flushed_mask),
msecs_to_jiffies(50));
if (ret <= 0)
- dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
+ dev_warn(dev->dev, "vblank time out, crtc=%s\n", mdp4_crtc->base.name);
mdp4_crtc->flushed_mask = 0;
@@ -624,7 +623,7 @@ static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id)
{
struct drm_crtc *crtc = NULL;
@@ -639,8 +638,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
- mdp4_crtc->id = id;
-
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index c469e66cfc11..0952c7f18abd 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
@@ -120,15 +122,16 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->vm);
drm_gem_object_put(mdp4_kms->blank_cursor_bo);
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
if (mdp4_kms->rpm_enabled)
@@ -189,7 +192,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct device_node *panel_node;
+ struct drm_bridge *next_bridge;
int dsi_id;
int ret;
@@ -199,27 +202,43 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
- panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
- if (!panel_node)
- return 0;
+ next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ ret = PTR_ERR(next_bridge);
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ encoder = mdp4_lcdc_encoder_init(dev);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
- of_node_put(panel_node);
return PTR_ERR(encoder);
}
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
- connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret);
+
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
- of_node_put(panel_node);
return PTR_ERR(connector);
}
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret);
+
+ return ret;
+ }
+
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
@@ -231,9 +250,9 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << 1;
- if (priv->hdmi) {
+ if (priv->kms->hdmi) {
/* Construct bridge/connector for HDMI: */
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
@@ -245,7 +264,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* only DSI1 supported for now */
dsi_id = 0;
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
encoder = mdp4_dsi_encoder_init(dev);
@@ -259,7 +278,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
@@ -278,7 +297,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
static int modeset_init(struct mdp4_kms *mdp4_kms)
{
struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -320,7 +338,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
goto fail;
}
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ crtc = mdp4_crtc_init(dev, plane, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
@@ -328,8 +346,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
ret = PTR_ERR(crtc);
goto fail;
}
-
- priv->num_crtcs++;
}
/*
@@ -380,7 +396,7 @@ static int mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int ret;
u32 major, minor;
unsigned long max_clk;
@@ -449,19 +465,20 @@ static int mdp4_kms_init(struct drm_device *dev)
} else if (!mmu) {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- aspace = NULL;
+ vm = NULL;
} else {
- aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0x100000000 - 0x1000);
+ vm = msm_gem_vm_create(dev, mmu, "mdp4",
+ 0x1000, 0x100000000 - 0x1000,
+ true);
- if (IS_ERR(aspace)) {
+ if (IS_ERR(vm)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- ret = PTR_ERR(aspace);
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
}
ret = modeset_init(mdp4_kms);
@@ -478,7 +495,7 @@ static int mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->vm,
&mdp4_kms->blank_cursor_iova);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 94b1ba92785f..fb348583dc84 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -185,18 +185,13 @@ void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id);
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node);
-
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
@@ -207,13 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-#ifdef CONFIG_COMMON_CLK
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
-#else
-static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 8bbc7fb881d5..06a307c1272d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -14,7 +14,6 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
- struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
@@ -262,19 +261,12 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_disable(panel);
- drm_panel_unprepare(panel);
- }
-
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
@@ -300,7 +292,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
uint32_t config;
int ret;
@@ -335,12 +326,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_prepare(panel);
- drm_panel_enable(panel);
- }
-
setup_phy(encoder);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
@@ -348,22 +333,34 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_lcdc_encoder->enabled = true;
}
+static enum drm_mode_status
+mdp4_lcdc_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
+ .mode_valid = mdp4_lcdc_encoder_mode_valid,
};
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
-}
-
/* initialize encoder */
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node)
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -374,14 +371,11 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
if (IS_ERR(mdp4_lcdc_encoder))
return ERR_CAST(mdp4_lcdc_encoder);
- mdp4_lcdc_encoder->panel_node = panel_node;
-
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- /* TODO: do we need different pll in other cases? */
- mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
deleted file mode 100644
index 52e728181b52..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- */
-
-#include "mdp4_kms.h"
-
-struct mdp4_lvds_connector {
- struct drm_connector base;
- struct drm_encoder *encoder;
- struct device_node *panel_node;
- struct drm_panel *panel;
-};
-#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
-
-static enum drm_connector_status mdp4_lvds_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- if (!mdp4_lvds_connector->panel) {
- mdp4_lvds_connector->panel =
- of_drm_find_panel(mdp4_lvds_connector->panel_node);
- if (IS_ERR(mdp4_lvds_connector->panel))
- mdp4_lvds_connector->panel = NULL;
- }
-
- return mdp4_lvds_connector->panel ?
- connector_status_connected :
- connector_status_disconnected;
-}
-
-static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- drm_connector_cleanup(connector);
-
- kfree(mdp4_lvds_connector);
-}
-
-static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_panel *panel = mdp4_lvds_connector->panel;
- int ret = 0;
-
- if (panel)
- ret = drm_panel_get_modes(panel, connector);
-
- return ret;
-}
-
-static enum drm_mode_status
-mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = mdp4_lcdc_round_pixclk(encoder, requested);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .detect = mdp4_lvds_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mdp4_lvds_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
- .get_modes = mdp4_lvds_connector_get_modes,
- .mode_valid = mdp4_lvds_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct mdp4_lvds_connector *mdp4_lvds_connector;
-
- mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector)
- return ERR_PTR(-ENOMEM);
-
- mdp4_lvds_connector->encoder = encoder;
- mdp4_lvds_connector->panel_node = panel_node;
-
- connector = &mdp4_lvds_connector->base;
-
- drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
-
- connector->polled = 0;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ab8c0c187fb2..fa2c29470510 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -122,40 +122,59 @@ static const struct clk_ops mpd4_lvds_pll_ops = {
.set_rate = mpd4_lvds_pll_set_rate,
};
-static const char *mpd4_lvds_pll_parents[] = {
- "pxo",
+static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+ { .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
.name = "mpd4_lvds_pll",
.ops = &mpd4_lvds_pll_ops,
- .parent_names = mpd4_lvds_pll_parents,
+ .parent_data = mpd4_lvds_pll_parents,
.num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
};
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
- struct clk *clk;
int ret;
lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
- if (!lvds_pll) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!lvds_pll)
+ return ERR_PTR(-ENOMEM);
lvds_pll->dev = dev;
lvds_pll->pll_hw.init = &pll_init;
- clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto fail;
+ ret = devm_clk_hw_register(dev->dev, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_of_clk_add_hw_provider(dev->dev, of_clk_hw_simple_get, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &lvds_pll->pll_hw;
+}
+
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+
+ /* TODO: do we need different pll in other cases? */
+ hw = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(hw)) {
+ DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
+ return ERR_CAST(hw);
}
- return clk;
+ clk = devm_clk_get(dev->dev, "lcdc_clk");
+ if (clk == ERR_PTR(-ENOENT)) {
+ drm_warn(dev, "can't get LCDC clock, using PLL directly\n");
-fail:
- return ERR_PTR(ret);
+ return devm_clk_hw_get_clk(dev->dev, hw, "lcdc_clk");
+ }
+
+ return clk;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 3fefb2088008..098c3b5ff2b2 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -79,30 +79,25 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
+ return msm_framebuffer_prepare(new_state->fb, false);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, false);
+ msm_framebuffer_cleanup(fb, false);
}
@@ -141,7 +136,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_pipe pipe = mdp4_plane->pipe;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -153,13 +147,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0f653e62b4a0..4c4900a7beda 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -169,7 +169,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -993,7 +993,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm,
&mdp5_crtc->cursor.iova);
if (ret) {
drm_gem_object_put(cursor_bo);
@@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
}
if (pending & PENDING_CURSOR)
- drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq);
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3fcca7a3d82e..5b6ca8dd929e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -198,11 +198,12 @@ static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_gem_address_space *aspace = kms->aspace;
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
mdp_kms_destroy(&mdp5_kms->base);
@@ -311,7 +312,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -326,7 +327,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
break;
case INTF_DSI:
{
@@ -334,14 +335,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
- if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ if ((dsi_id >= ARRAY_SIZE(priv->kms->dsi)) || (dsi_id < 0)) {
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
}
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -356,9 +357,10 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (!ret)
- mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+ mdp5_encoder_set_intf_mode(encoder,
+ msm_dsi_is_cmd_mode(priv->kms->dsi[dsi_id]));
break;
}
@@ -374,7 +376,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
@@ -442,7 +443,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
- priv->num_crtcs++;
}
/*
@@ -450,7 +450,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* crtcs for the encoders
*/
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
@@ -500,7 +500,7 @@ static int mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms = priv->kms;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
@@ -534,13 +534,13 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- aspace = msm_kms_init_aspace(mdp5_kms->dev);
- if (IS_ERR(aspace)) {
- ret = PTR_ERR(aspace);
+ vm = msm_kms_init_vm(mdp5_kms->dev);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index bb1601921938..7c790406d533 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -135,8 +135,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
if (!new_state->fb)
@@ -144,14 +142,12 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+ return msm_framebuffer_prepare(new_state->fb, needs_dirtyfb);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
@@ -159,7 +155,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+ msm_framebuffer_cleanup(fb, needed_dirtyfb);
}
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
@@ -467,8 +463,6 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
enum mdp5_pipe pipe,
struct drm_framebuffer *fb)
{
- struct msm_kms *kms = &mdp5_kms->base.base;
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -478,13 +472,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
/* Note: mdp5_plane->pipe_lock must be locked */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 07a2c1e87219..071bcdea80f7 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -127,18 +127,18 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
priv = drm_dev->dev_private;
kms = priv->kms;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dp); i++) {
+ if (!kms->dp[i])
continue;
- msm_dp_snapshot(disp_state, priv->dp[i]);
+ msm_dp_snapshot(disp_state, kms->dp[i]);
}
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dsi); i++) {
+ if (!kms->dsi[i])
continue;
- msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ msm_dsi_snapshot(disp_state, kms->dsi[i]);
}
if (kms->funcs->snapshot)
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 70fdc9fe228a..41018e82efa1 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -11,24 +11,38 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include "dp_catalog.h"
#include "dp_audio.h"
+#include "dp_drm.h"
#include "dp_panel.h"
#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
struct msm_dp_audio_private {
- struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
u32 channels;
struct msm_dp_audio msm_dp_audio;
};
+static inline u32 msm_dp_read_link(struct msm_dp_audio_private *audio, u32 offset)
+{
+ return readl_relaxed(audio->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_audio_private *audio,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, audio->link_base + offset);
+}
+
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
struct dp_sdp_header sdp_hdr = {
@@ -37,8 +51,12 @@ static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x00,
.HB3 = audio->channels - 1,
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_1, header[1]);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
@@ -49,8 +67,12 @@ static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x17,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
- msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
@@ -61,8 +83,12 @@ static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x1b,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
@@ -73,8 +99,12 @@ static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
- msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
@@ -85,13 +115,53 @@ static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(audio, MMSS_DP_AUDIO_ISRC_1);
+ sdp_hdr.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_1, header[1]);
+}
+
+static void msm_dp_audio_config_sdp(struct msm_dp_audio_private *audio)
+{
+ u32 sdp_cfg, sdp_cfg2;
+
+ sdp_cfg = msm_dp_read_link(audio, MMSS_DP_SDP_CFG);
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
+
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG, sdp_cfg);
- msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
+ sdp_cfg2 = msm_dp_read_link(audio, MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
+
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
{
- msm_dp_catalog_audio_config_sdp(audio->catalog);
+ msm_dp_audio_config_sdp(audio);
msm_dp_audio_stream_sdp(audio);
msm_dp_audio_timestamp_sdp(audio);
@@ -102,8 +172,7 @@ static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
{
- u32 select = 0;
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 select, acr_ctrl;
switch (audio->msm_dp_audio.bw_code) {
case DP_LINK_BW_1_62:
@@ -124,13 +193,17 @@ static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_config_acr(catalog, select);
+ acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+ drm_dbg_dp(audio->drm_dev, "select: %#x, acr_ctrl: %#x\n",
+ select, acr_ctrl);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 safe_to_exit_level = 0;
+ u32 safe_to_exit_level, mainlink_levels;
switch (audio->msm_dp_audio.lane_count) {
case 1:
@@ -150,34 +223,40 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
+ mainlink_levels = msm_dp_read_link(audio, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels &= 0xFE0;
+ mainlink_levels |= safe_to_exit_level;
+
+ drm_dbg_dp(audio->drm_dev,
+ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ mainlink_levels, safe_to_exit_level);
+
+ msm_dp_write_link(audio, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
{
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 audio_ctrl;
- msm_dp_catalog_audio_enable(catalog, enable);
+ audio_ctrl = msm_dp_read_link(audio, MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ drm_dbg_dp(audio->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ /* make sure audio engine is disabled */
+ wmb();
}
-static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
{
struct msm_dp_audio *msm_dp_audio;
- struct msm_dp *msm_dp_display;
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
msm_dp_audio = msm_dp_display->msm_dp_audio;
-
if (!msm_dp_audio) {
DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
@@ -186,68 +265,16 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
-
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
- if (!pdev) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
-}
-
-static int msm_dp_audio_get_eld(struct device *dev,
- void *data, uint8_t *buf, size_t len)
-{
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- mutex_lock(&msm_dp_display->connector->eld_mutex);
- memcpy(buf, msm_dp_display->connector->eld,
- min(sizeof(msm_dp_display->connector->eld), len));
- mutex_unlock(&msm_dp_display->connector->eld_mutex);
-
- return 0;
-}
-
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
int rc = 0;
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
/*
* there could be cases where sound card can be opened even
@@ -262,7 +289,7 @@ int msm_dp_audio_hw_params(struct device *dev,
goto end;
}
- audio = msm_dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -281,15 +308,14 @@ end:
return rc;
}
-static void msm_dp_audio_shutdown(struct device *dev, void *data)
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connecter)
{
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
- audio = msm_dp_audio_get_data(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -311,55 +337,14 @@ static void msm_dp_audio_shutdown(struct device *dev, void *data)
msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
- .hw_params = msm_dp_audio_hw_params,
- .audio_shutdown = msm_dp_audio_shutdown,
- .get_eld = msm_dp_audio_get_eld,
- .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_dp_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
-
- if (audio_priv->audio_pdev) {
- platform_device_unregister(audio_priv->audio_pdev);
- audio_priv->audio_pdev = NULL;
- }
-}
-
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio,
- struct msm_dp_audio_private, msm_dp_audio);
-
- audio_priv->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
-}
-
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog)
+ void __iomem *link_base)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !catalog) {
+ if (!pdev) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -372,7 +357,7 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
}
audio->pdev = pdev;
- audio->catalog = catalog;
+ audio->link_base = link_base;
msm_dp_audio = &audio->msm_dp_audio;
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index beea34cbab77..ce2342856adb 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,9 +8,10 @@
#include <linux/platform_device.h>
-#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
+struct drm_bridge;
+
/**
* struct msm_dp_audio
* @lane_count: number of lanes configured in current session
@@ -27,30 +28,13 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @catalog: an instance of msm_dp_catalog module.
+ * @link_base: pointer to the msm_dp_link resource.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog);
-
-/**
- * msm_dp_register_audio_driver()
- *
- * Registers DP device with hdmi_codec interface.
- *
- * @dev: DP device instance.
- * @msm_dp_audio: an instance of msm_dp_audio module.
- *
- *
- * Returns the error code in case of failure, otherwise
- * zero on success.
- */
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio);
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
+ void __iomem *link_base);
/**
* msm_dp_audio_put()
@@ -61,10 +45,12 @@ void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params);
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index bc8d46abfc61..3825a2fb48e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <drm/drm_print.h>
@@ -22,7 +23,7 @@ enum msm_dp_aux_err {
struct msm_dp_aux_private {
struct device *dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *aux_base;
struct phy *phy;
@@ -45,6 +46,80 @@ struct msm_dp_aux_private {
struct drm_dp_aux msm_dp_aux;
};
+static inline u32 msm_dp_read_aux(struct msm_dp_aux_private *aux, u32 offset)
+{
+ return readl_relaxed(aux->aux_base + offset);
+}
+
+static inline void msm_dp_write_aux(struct msm_dp_aux_private *aux,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure aux reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, aux->aux_base + offset);
+}
+
+static void msm_dp_aux_clear_hw_interrupts(struct msm_dp_aux_private *aux)
+{
+ msm_dp_read_aux(aux, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+}
+
+/*
+ * NOTE: resetting AUX controller will also clear any pending HPD related interrupts
+ */
+static void msm_dp_aux_reset(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ aux_ctrl |= DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ aux_ctrl &= ~DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_enable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ msm_dp_write_aux(aux, REG_DP_TIMEOUT_COUNT, 0xffff);
+ msm_dp_write_aux(aux, REG_DP_AUX_LIMITS, 0xffff);
+
+ aux_ctrl |= DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_disable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+ aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static int msm_dp_aux_wait_for_hpd_connect_state(struct msm_dp_aux_private *aux,
+ unsigned long wait_us)
+{
+ u32 state;
+
+ /* poll for hpd connected status every 2ms and timeout after wait_us */
+ return readl_poll_timeout(aux->aux_base +
+ REG_DP_DP_HPD_INT_STATUS,
+ state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+ min(wait_us, 2000), wait_us);
+}
+
#define MAX_AUX_RETRIES 5
static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
@@ -88,11 +163,11 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- msm_dp_catalog_aux_write_data(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, reg);
}
- msm_dp_catalog_aux_clear_trans(aux->catalog, false);
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, 0);
+ msm_dp_aux_clear_hw_interrupts(aux);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
@@ -106,7 +181,7 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- msm_dp_catalog_aux_write_trans(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, reg);
return len;
}
@@ -139,20 +214,22 @@ static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
u32 i, actual_i;
u32 len = msg->size;
- msm_dp_catalog_aux_clear_trans(aux->catalog, true);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_TRANS_CTRL);
+ data &= ~DP_AUX_TRANS_CTRL_GO;
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, data);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- msm_dp_catalog_aux_write_data(aux->catalog, data);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, data);
dp = msg->buffer;
/* discard first byte */
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
for (i = 0; i < len; i++) {
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
@@ -335,8 +412,8 @@ static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
- if (msm_dp_catalog_link_is_connected(aux->catalog))
- msm_dp_catalog_aux_reset(aux->catalog);
+ if (msm_dp_aux_is_link_connected(msm_dp_aux))
+ msm_dp_aux_reset(aux);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
@@ -369,9 +446,8 @@ exit:
return ret;
}
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr)
{
- u32 isr;
struct msm_dp_aux_private *aux;
if (!msm_dp_aux) {
@@ -381,12 +457,6 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
- isr = msm_dp_catalog_aux_get_irq(aux->catalog);
-
- /* no interrupts pending, return immediately */
- if (!isr)
- return IRQ_NONE;
-
if (!aux->cmd_busy) {
DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
return IRQ_NONE;
@@ -403,7 +473,7 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_aux_clear_hw_interrupts(aux);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
@@ -444,7 +514,7 @@ void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
phy_calibrate(aux->phy);
- msm_dp_catalog_aux_reset(aux->catalog);
+ msm_dp_aux_reset(aux);
}
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
@@ -460,7 +530,7 @@ void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
- msm_dp_catalog_aux_enable(aux->catalog, true);
+ msm_dp_aux_enable(aux);
aux->retry_cnt = 0;
aux->initted = true;
@@ -476,7 +546,7 @@ void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
aux->initted = false;
- msm_dp_catalog_aux_enable(aux->catalog, false);
+ msm_dp_aux_disable(aux);
mutex_unlock(&aux->mutex);
}
@@ -517,23 +587,105 @@ static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
if (ret)
return ret;
- ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ ret = msm_dp_aux_wait_for_hpd_connect_state(aux, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
}
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ /* Configure REFTIMER and enable it */
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg |= DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ /* Enable HPD */
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg &= ~DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, 0);
+}
+
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg |= DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg &= ~DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ int isr, mask;
+
+ isr = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_ACK,
+ (isr & DP_DP_HPD_INT_MASK));
+ mask = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+
+ /*
+ * We only want to return interrupts that are unmasked to the caller.
+ * However, the interrupt status field also contains other
+ * informational bits about the HPD state status, so we only mask
+ * out the part of the register that tells us about which interrupts
+ * are pending.
+ */
+ return isr & (mask | ~DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 status;
+
+ status = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
+ status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
+
+ return status;
+}
+
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp)
+ bool is_edp,
+ void __iomem *aux_base)
{
struct msm_dp_aux_private *aux;
- if (!catalog) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
@@ -544,9 +696,9 @@ struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *cat
mutex_init(&aux->mutex);
aux->dev = dev;
- aux->catalog = catalog;
aux->phy = phy;
aux->retry_cnt = 0;
+ aux->aux_base = aux_base;
/*
* Use the drm_dp_aux_init() to use the aux adapter
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 39c5b4c8596a..4be02e8b4d0b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -6,21 +6,28 @@
#ifndef _DP_AUX_H_
#define _DP_AUX_H_
-#include "dp_catalog.h"
#include <drm/display/drm_dp_helper.h>
int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux);
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux);
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr);
void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled);
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux);
+
struct phy;
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp);
+ bool is_edp,
+ void __iomem *aux_base);
void msm_dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
deleted file mode 100644
index 7b7eadb2f83b..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ /dev/null
@@ -1,1298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/platform_device.h>
-#include <linux/rational.h>
-#include <drm/display/drm_dp_helper.h>
-#include <drm/drm_print.h>
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define POLLING_SLEEP_US 1000
-#define POLLING_TIMEOUT_US 10000
-
-#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
-
-#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
-#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
-
-#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
-
-#define DP_INTERRUPT_STATUS1 \
- (DP_INTR_AUX_XFER_DONE| \
- DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
- DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
- DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
- DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
-
-#define DP_INTERRUPT_STATUS1_ACK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS1_MASK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS2 \
- (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
- DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
-
-#define DP_INTERRUPT_STATUS2_ACK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS2_MASK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS4 \
- (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
- PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
-
-#define DP_INTERRUPT_MASK4 \
- (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
- PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
-
-#define DP_DEFAULT_AHB_OFFSET 0x0000
-#define DP_DEFAULT_AHB_SIZE 0x0200
-#define DP_DEFAULT_AUX_OFFSET 0x0200
-#define DP_DEFAULT_AUX_SIZE 0x0200
-#define DP_DEFAULT_LINK_OFFSET 0x0400
-#define DP_DEFAULT_LINK_SIZE 0x0C00
-#define DP_DEFAULT_P0_OFFSET 0x1000
-#define DP_DEFAULT_P0_SIZE 0x0400
-
-struct dss_io_region {
- size_t len;
- void __iomem *base;
-};
-
-struct dss_io_data {
- struct dss_io_region ahb;
- struct dss_io_region aux;
- struct dss_io_region link;
- struct dss_io_region p0;
-};
-
-struct msm_dp_catalog_private {
- struct device *dev;
- struct drm_device *drm_dev;
- struct dss_io_data io;
- struct msm_dp_catalog msm_dp_catalog;
-};
-
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dss_io_data *dss = &catalog->io;
-
- msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
- msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
- msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
- msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
-}
-
-static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.aux.base + offset);
-}
-
-static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure aux reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.aux.base + offset);
-}
-
-static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure phy reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog,
- u32 offset)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- return readl_relaxed(catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.link.base + offset);
-}
-
-static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure link reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.link.base + offset);
-}
-
-/* aux related catalog functions */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_aux(catalog, REG_DP_AUX_DATA);
-}
-
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read)
-{
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (read) {
- data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
- data &= ~DP_AUX_TRANS_CTRL_GO;
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- } else {
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
- }
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
- return 0;
-}
-
-/**
- * msm_dp_catalog_aux_reset() - reset AUX controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset AUX controller
- *
- * NOTE: reset AUX controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- aux_ctrl |= DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- aux_ctrl &= ~DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- if (enable) {
- msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
- msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
- aux_ctrl |= DP_AUX_CTRL_ENABLE;
- } else {
- aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
- }
-
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us)
-{
- u32 state;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* poll for hpd connected status every 2ms and timeout after wait_us */
- return readl_poll_timeout(catalog->io.aux.base +
- REG_DP_DP_HPD_INT_STATUS,
- state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
- min(wait_us, 2000), wait_us);
-}
-
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS);
- intr &= ~DP_INTERRUPT_STATUS1_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS1)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
- DP_INTERRUPT_STATUS1_MASK);
-
- return intr;
-
-}
-
-/* controller related catalog functions */
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
- msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu);
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
-}
-
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state);
-}
-
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
-
- msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
-}
-
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
- u32 ln_mapping;
-
- ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
- ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
- ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
- ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
-
- msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
- ln_mapping);
-}
-
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- if (enable)
- val |= DP_MAINLINK_CTRL_ENABLE;
- else
- val &= ~DP_MAINLINK_CTRL_ENABLE;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
-}
-
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 mainlink_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
- if (enable) {
- /*
- * To make sure link reg writes happens before other operation,
- * msm_dp_write_link() function uses writel()
- */
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
- DP_MAINLINK_FB_BOUNDARY_SEL);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- } else {
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- }
-}
-
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog,
- u32 colorimetry_cfg,
- u32 test_bits_depth)
-{
- u32 misc_val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- /* clear bpp bits */
- misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
- misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
- misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
- /* Configure clock to synchronous mode */
- misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
-
- drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
-}
-
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 mainlink_ctrl, hw_revision;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision >= DP_HW_VERSION_1_2)
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
- else
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-}
-
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog,
- u32 rate, u32 stream_rate_khz,
- bool is_ycbcr_420)
-{
- u32 pixel_m, pixel_n;
- u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
- u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
- u32 const link_rate_hbr2 = 540000;
- u32 const link_rate_hbr3 = 810000;
- unsigned long den, num;
-
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (rate == link_rate_hbr3)
- pixel_div = 6;
- else if (rate == 162000 || rate == 270000)
- pixel_div = 2;
- else if (rate == link_rate_hbr2)
- pixel_div = 4;
- else
- DRM_ERROR("Invalid pixel mux divider\n");
-
- dispcc_input_rate = (rate * 10) / pixel_div;
-
- rational_best_approximation(dispcc_input_rate, stream_rate_khz,
- (unsigned long)(1 << 16) - 1,
- (unsigned long)(1 << 16) - 1, &den, &num);
-
- den = ~(den - num);
- den = den & 0xFFFF;
- pixel_m = num;
- pixel_n = den;
-
- mvid = (pixel_m & 0xFFFF) * 5;
- nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
- if (nvid < nvid_fixed) {
- u32 temp;
-
- temp = (nvid_fixed / nvid) * nvid;
- mvid = (nvid_fixed / nvid) * mvid;
- nvid = temp;
- }
-
- if (is_ycbcr_420)
- mvid /= 2;
-
- if (link_rate_hbr2 == rate)
- nvid *= 2;
-
- if (link_rate_hbr3 == rate)
- nvid *= 3;
-
- drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
- msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
-}
-
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog,
- u32 state_bit)
-{
- int bit, ret;
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- bit = BIT(state_bit - 1);
- drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
- msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit);
-
- bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
-
- /* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & bit,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
- return ret;
- }
- return 0;
-}
-
-/**
- * msm_dp_catalog_hw_revision() - retrieve DP hw revision
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * Return: DP controller hw revision
- *
- */
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog)
-{
- const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION);
-}
-
-/**
- * msm_dp_catalog_ctrl_reset() - reset DP controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset the DP controller
- *
- * NOTE: reset DP controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 sw_reset;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET);
-
- sw_reset |= DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- sw_reset &= ~DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
-}
-
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 data;
- int ret;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & DP_MAINLINK_READY_FOR_VIDEO,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("mainlink not ready\n");
- return false;
- }
-
- return true;
-}
-
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (enable) {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS,
- DP_INTERRUPT_STATUS1_MASK);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- DP_INTERRUPT_STATUS2_MASK);
- } else {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
- }
-}
-
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- config = (en ? config | intr_mask : config & ~intr_mask);
-
- drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
- intr_mask, config);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
- config & DP_DP_HPD_INT_MASK);
-}
-
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- /* Configure REFTIMER and enable it */
- reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- /* Enable HPD */
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
-}
-
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
-}
-
-static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog)
-{
- /* trigger sdp */
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
-}
-
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 config;
-
- /* enable PSR1 function */
- config = msm_dp_read_link(catalog, REG_PSR_CONFIG);
- config |= PSR1_SUPPORTED;
- msm_dp_write_link(catalog, REG_PSR_CONFIG, config);
-
- msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
- msm_dp_catalog_enable_sdp(catalog);
-}
-
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 cmd;
-
- cmd = msm_dp_read_link(catalog, REG_PSR_CMD);
-
- cmd &= ~(PSR_ENTER | PSR_EXIT);
-
- if (enter)
- cmd |= PSR_ENTER;
- else
- cmd |= PSR_EXIT;
-
- msm_dp_catalog_enable_sdp(catalog);
- msm_dp_write_link(catalog, REG_PSR_CMD, cmd);
-}
-
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 status;
-
- status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
- status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
- status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
-
- return status;
-}
-
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- int isr, mask;
-
- isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
- (isr & DP_DP_HPD_INT_MASK));
- mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- /*
- * We only want to return interrupts that are unmasked to the caller.
- * However, the interrupt status field also contains other
- * informational bits about the HPD state status, so we only mask
- * out the part of the register that tells us about which interrupts
- * are pending.
- */
- return isr & (mask | ~DP_DP_HPD_INT_MASK);
-}
-
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
- intr_ack = (intr & DP_INTERRUPT_STATUS4)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
-
- return intr;
-}
-
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
- intr &= ~DP_INTERRUPT_STATUS2_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS2)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- intr_ack | DP_INTERRUPT_STATUS2_MASK);
-
- return intr;
-}
-
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL,
- DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
- usleep_range(1000, 1100); /* h/w recommended delay */
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
-}
-
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 value = 0x0;
-
- /* Make sure to clear the current pattern before starting a new one */
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
-
- drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
- switch (pattern) {
- case DP_PHY_TEST_PATTERN_D10_2:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
- break;
- case DP_PHY_TEST_PATTERN_ERROR_COUNT:
- value &= ~(1 << 16);
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- break;
- case DP_PHY_TEST_PATTERN_PRBS7:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_PRBS7);
- break;
- case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
- /* 00111110000011111000001111100000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
- 0x3E0F83E0);
- /* 00001111100000111110000011111000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
- 0x0F83E0F8);
- /* 1111100000111110 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
- 0x0000F83E);
- break;
- case DP_PHY_TEST_PATTERN_CP2520:
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
-
- value = DP_HBR2_ERM_PATTERN;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value |= DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
- break;
- case DP_PHY_TEST_PATTERN_SEL_MASK:
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
- break;
- default:
- drm_dbg_dp(catalog->drm_dev,
- "No valid test pattern requested: %#x\n", pattern);
- break;
- }
-}
-
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY);
-}
-
-/* panel related catalog functions */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reg;
-
- msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
- msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
- msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
- msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
-
- reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
-
- if (msm_dp_catalog->wide_bus_en)
- reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
- else
- reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
-
-
- DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg);
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
- return 0;
-}
-
-static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 header[2];
- u32 val;
- int i;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
-
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
-
- for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
- val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
- (vsc_sdp->db[i + 3] << 24));
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
- }
-}
-
-static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 hw_revision;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
- }
-}
-
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg |= GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 |= GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp);
-
- /* indicates presence of VSC (BIT(6) of MISC1) */
- misc |= DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg &= ~GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 &= ~GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- /* switch back to MSA */
- misc &= ~DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 hsync_period, vsync_period;
- u32 display_v_start, display_v_end;
- u32 hsync_start_x, hsync_end_x;
- u32 v_sync_width;
- u32 hsync_ctl;
- u32 display_hctl;
-
- /* TPG config parameters*/
- hsync_period = drm_mode->htotal;
- vsync_period = drm_mode->vtotal;
-
- display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
- hsync_period);
- display_v_end = ((vsync_period - (drm_mode->vsync_start -
- drm_mode->vdisplay))
- * hsync_period) - 1;
-
- display_v_start += drm_mode->htotal - drm_mode->hsync_start;
- display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
-
- hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
- hsync_end_x = hsync_period - (drm_mode->hsync_start -
- drm_mode->hdisplay) - 1;
-
- v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
-
- hsync_ctl = (hsync_period << 16) |
- (drm_mode->hsync_end - drm_mode->hsync_start);
- display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
- DP_TPG_CHECKERED_RECT_PATTERN);
- msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
- DP_TPG_VIDEO_CONFIG_BPP_8BIT |
- DP_TPG_VIDEO_CONFIG_RGB);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
- DP_BIST_ENABLE_DPBIST_EN);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
- DP_TIMING_ENGINE_EN_EN);
- drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
-}
-
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
-}
-
-static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
-{
- struct resource *res;
- void __iomem *base;
-
- base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
- if (!IS_ERR(base))
- *len = resource_size(res);
-
- return base;
-}
-
-static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog)
-{
- struct platform_device *pdev = to_platform_device(catalog->dev);
- struct dss_io_data *dss = &catalog->io;
-
- dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len);
- if (IS_ERR(dss->ahb.base))
- return PTR_ERR(dss->ahb.base);
-
- dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len);
- if (IS_ERR(dss->aux.base)) {
- /*
- * The initial binding had a single reg, but in order to
- * support variation in the sub-region sizes this was split.
- * msm_dp_ioremap() will fail with -EINVAL here if only a single
- * reg is specified, so fill in the sub-region offsets and
- * lengths based on this single region.
- */
- if (PTR_ERR(dss->aux.base) == -EINVAL) {
- if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
- DRM_ERROR("legacy memory region not large enough\n");
- return -EINVAL;
- }
-
- dss->ahb.len = DP_DEFAULT_AHB_SIZE;
- dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
- dss->aux.len = DP_DEFAULT_AUX_SIZE;
- dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
- dss->link.len = DP_DEFAULT_LINK_SIZE;
- dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
- dss->p0.len = DP_DEFAULT_P0_SIZE;
- } else {
- DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
- return PTR_ERR(dss->aux.base);
- }
- } else {
- dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len);
- if (IS_ERR(dss->link.base)) {
- DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
- return PTR_ERR(dss->link.base);
- }
-
- dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len);
- if (IS_ERR(dss->p0.base)) {
- DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
- return PTR_ERR(dss->p0.base);
- }
- }
-
- return 0;
-}
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
-{
- struct msm_dp_catalog_private *catalog;
- int ret;
-
- catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
- if (!catalog)
- return ERR_PTR(-ENOMEM);
-
- catalog->dev = dev;
-
- ret = msm_dp_catalog_get_io(catalog);
- if (ret)
- return ERR_PTR(ret);
-
- return &catalog->msm_dp_catalog;
-}
-
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dp_sdp_header tmp = *sdp_hdr;
- u32 header[2];
- u32 reg;
-
- /* XXX: is it necessary to preserve this field? */
- reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
- tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
-
- msm_dp_utils_pack_sdp_header(&tmp, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
-}
-
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
-{
- struct msm_dp_catalog_private *catalog;
- u32 acr_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
-
- drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
- select, acr_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
-}
-
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- struct msm_dp_catalog_private *catalog;
- u32 audio_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
-
- if (enable)
- audio_ctrl |= BIT(0);
- else
- audio_ctrl &= ~BIT(0);
-
- drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
- /* make sure audio engine is disabled */
- wmb();
-}
-
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 sdp_cfg = 0;
- u32 sdp_cfg2 = 0;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- /* AUDIO_TIMESTAMP_SDP_EN */
- sdp_cfg |= BIT(1);
- /* AUDIO_STREAM_SDP_EN */
- sdp_cfg |= BIT(2);
- /* AUDIO_COPY_MANAGEMENT_SDP_EN */
- sdp_cfg |= BIT(5);
- /* AUDIO_ISRC_SDP_EN */
- sdp_cfg |= BIT(6);
- /* AUDIO_INFOFRAME_SDP_EN */
- sdp_cfg |= BIT(20);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
-
- sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- /* IFRM_REGSRC -> Do not use reg values */
- sdp_cfg2 &= ~BIT(0);
- /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
- sdp_cfg2 &= ~BIT(1);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
-}
-
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
-{
- struct msm_dp_catalog_private *catalog;
- u32 mainlink_levels;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
- mainlink_levels &= 0xFE0;
- mainlink_levels |= safe_to_exit_level;
-
- drm_dbg_dp(catalog->drm_dev,
- "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
- mainlink_levels, safe_to_exit_level);
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
deleted file mode 100644
index 6678b0ac9a67..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_CATALOG_H_
-#define _DP_CATALOG_H_
-
-#include <drm/drm_modes.h>
-
-#include "dp_utils.h"
-#include "disp/msm_disp_snapshot.h"
-
-/* interrupts */
-#define DP_INTR_HPD BIT(0)
-#define DP_INTR_AUX_XFER_DONE BIT(3)
-#define DP_INTR_WRONG_ADDR BIT(6)
-#define DP_INTR_TIMEOUT BIT(9)
-#define DP_INTR_NACK_DEFER BIT(12)
-#define DP_INTR_WRONG_DATA_CNT BIT(15)
-#define DP_INTR_I2C_NACK BIT(18)
-#define DP_INTR_I2C_DEFER BIT(21)
-#define DP_INTR_PLL_UNLOCKED BIT(24)
-#define DP_INTR_AUX_ERROR BIT(27)
-
-#define DP_INTR_READY_FOR_VIDEO BIT(0)
-#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
-#define DP_INTR_FRAME_END BIT(6)
-#define DP_INTR_CRC_UPDATED BIT(9)
-
-#define DP_HW_VERSION_1_0 0x10000000
-#define DP_HW_VERSION_1_2 0x10020000
-
-struct msm_dp_catalog {
- bool wide_bus_en;
-};
-
-/* Debug module */
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state);
-
-/* AUX APIs */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read);
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us);
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Controller APIs */
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state);
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config);
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb);
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate,
- u32 stream_rate_khz, bool is_ycbcr_420);
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern);
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog);
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en);
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter);
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2);
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern);
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Panel APIs */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active);
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode);
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
-
-/* DP Audio APIs */
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
-
-#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d8633a596f8d..c42fd2c17a32 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -6,14 +6,18 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/types.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/rational.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -21,11 +25,46 @@
#include "dp_ctrl.h"
#include "dp_link.h"
+#define POLLING_SLEEP_US 1000
+#define POLLING_TIMEOUT_US 10000
+
#define DP_KHZ_TO_HZ 1000
#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
#define PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES (300 * HZ / 1000) /* 300 ms */
#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define DP_INTERRUPT_STATUS1 \
+ (DP_INTR_AUX_XFER_DONE| \
+ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+ DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+ (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+ DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS4 \
+ (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
+ PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
+
+#define DP_INTERRUPT_MASK4 \
+ (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
+ PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
+
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
@@ -77,7 +116,8 @@ struct msm_dp_ctrl_private {
struct drm_dp_aux *aux;
struct msm_dp_panel *panel;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *ahb_base;
+ void __iomem *link_base;
struct phy *phy;
@@ -95,11 +135,43 @@ struct msm_dp_ctrl_private {
struct completion psr_op_comp;
struct completion video_comp;
+ u32 hw_revision;
+
bool core_clks_on;
bool link_clks_on;
bool stream_clks_on;
};
+static inline u32 msm_dp_read_ahb(const struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->ahb_base + offset);
+}
+
+static inline void msm_dp_write_ahb(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure phy reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->ahb_base + offset);
+}
+
+static inline u32 msm_dp_read_link(struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->link_base + offset);
+}
+
static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
struct msm_dp_link_info *link)
{
@@ -119,6 +191,179 @@ static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
return 0;
}
+/*
+ * NOTE: resetting DP controller will also clear any pending HPD related interrupts
+ */
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 sw_reset;
+
+ sw_reset = msm_dp_read_ahb(ctrl, REG_DP_SW_RESET);
+
+ sw_reset |= DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ sw_reset &= ~DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+
+ if (!ctrl->hw_revision) {
+ ctrl->hw_revision = msm_dp_read_ahb(ctrl, REG_DP_HW_VERSION);
+ ctrl->panel->hw_revision = ctrl->hw_revision;
+ }
+}
+
+static u32 msm_dp_ctrl_get_aux_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS);
+ intr &= ~DP_INTERRUPT_STATUS1_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS1)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ intr_ack | DP_INTERRUPT_STATUS1_MASK);
+
+ return intr;
+
+}
+
+static u32 msm_dp_ctrl_get_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS2);
+ intr &= ~DP_INTERRUPT_STATUS2_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS2)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+ return intr;
+}
+
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ DP_INTERRUPT_STATUS1_MASK);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ DP_INTERRUPT_STATUS2_MASK);
+}
+
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS, 0x00);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2, 0x00);
+}
+
+static u32 msm_dp_ctrl_get_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS4);
+ intr_ack = (intr & DP_INTERRUPT_STATUS4)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS4, intr_ack);
+
+ return intr;
+}
+
+static void msm_dp_ctrl_config_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
+}
+
+static void msm_dp_ctrl_psr_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_psr_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "enable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+ DP_MAINLINK_FB_BOUNDARY_SEL);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_ctrl_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "disable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_setup_peripheral_flush(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ if (ctrl->hw_revision >= DP_HW_VERSION_1_2)
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
+ else
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
+
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static bool msm_dp_ctrl_mainlink_ready(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 data;
+ int ret;
+
+ /* Poll for mainlink ready status */
+ ret = readl_poll_timeout(ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & DP_MAINLINK_READY_FOR_VIDEO,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("mainlink not ready\n");
+ return false;
+ }
+
+ return true;
+}
+
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -126,7 +371,7 @@ void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
reinit_completion(&ctrl->idle_comp);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
@@ -171,23 +416,50 @@ static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
- msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+ drm_dbg_dp(ctrl->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", config);
+
+ msm_dp_write_link(ctrl, REG_DP_CONFIGURATION_CTRL, config);
+}
+
+static void msm_dp_ctrl_lane_mapping(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 ln_mapping;
+
+ ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+ ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+ ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+ ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+ msm_dp_write_link(ctrl, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ ln_mapping);
}
static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl)
{
- u32 cc, tb;
+ u32 colorimetry_cfg, test_bits_depth, misc_val;
- msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ msm_dp_ctrl_lane_mapping(ctrl);
+ msm_dp_setup_peripheral_flush(ctrl);
msm_dp_ctrl_config_ctrl(ctrl);
- tb = msm_dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->msm_dp_mode.bpp);
- cc = msm_dp_link_get_colorimetry_config(ctrl->link);
- msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
- msm_dp_panel_timing_cfg(ctrl->panel);
+ test_bits_depth = msm_dp_link_get_test_bits_depth(ctrl->link, ctrl->panel->msm_dp_mode.bpp);
+ colorimetry_cfg = msm_dp_link_get_colorimetry_config(ctrl->link);
+
+ misc_val = msm_dp_read_link(ctrl, REG_DP_MISC1_MISC0);
+
+ /* clear bpp bits */
+ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+ misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+ misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+ /* Configure clock to synchronous mode */
+ misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+ drm_dbg_dp(ctrl->drm_dev, "misc settings = 0x%x\n", misc_val);
+ msm_dp_write_link(ctrl, REG_DP_MISC1_MISC0, misc_val);
+
+ msm_dp_panel_timing_cfg(ctrl->panel, ctrl->msm_dp_ctrl.wide_bus_en);
}
/*
@@ -1003,8 +1275,9 @@ static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl)
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
msm_dp_tu, valid_boundary, valid_boundary2);
- msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
- msm_dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY, valid_boundary);
+ msm_dp_write_link(ctrl, REG_DP_TU, msm_dp_tu);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl)
@@ -1034,10 +1307,12 @@ static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
return 0;
}
-static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_phy_vx_px(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
struct msm_dp_link *link = ctrl->link;
- int ret = 0, lane, lane_cnt;
+ int lane, lane_cnt, reg;
+ int ret = 0;
u8 buf[4];
u32 max_level_reached = 0;
u32 voltage_swing_level = link->phy_params.v_level;
@@ -1075,8 +1350,13 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
voltage_swing_level | pre_emphasis_level);
- ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
- buf, lane_cnt);
+
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_LANE0_SET;
+ else
+ reg = DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
+
+ ret = drm_dp_dpcd_write(ctrl->aux, reg, buf, lane_cnt);
if (ret == lane_cnt)
ret = 0;
@@ -1084,9 +1364,10 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
}
static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
- u8 pattern)
+ u8 pattern, enum drm_dp_phy dp_phy)
{
u8 buf;
+ int reg;
int ret = 0;
drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
@@ -1096,51 +1377,71 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
if (pattern && pattern != DP_TRAINING_PATTERN_4)
buf |= DP_LINK_SCRAMBLING_DISABLE;
- ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_PATTERN_SET;
+ else
+ reg = DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
+
+ ret = drm_dp_dpcd_writeb(ctrl->aux, reg, buf);
return ret == 1;
}
-static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
- u8 *link_status)
+static int msm_dp_ctrl_set_pattern_state_bit(struct msm_dp_ctrl_private *ctrl,
+ u32 state_bit)
{
- int ret = 0, len;
+ int bit, ret;
+ u32 data;
- len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
- if (len != DP_LINK_STATUS_SIZE) {
- DRM_ERROR("DP link status read failed, err: %d\n", len);
- ret = -EINVAL;
+ bit = BIT(state_bit - 1);
+ drm_dbg_dp(ctrl->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, bit);
+
+ bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+ /* Poll for mainlink ready status */
+ ret = readx_poll_timeout(readl, ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & bit,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
+ return ret;
}
- return ret;
+ return 0;
}
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_1;
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, 1);
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE);
+ DP_LINK_SCRAMBLING_DISABLE, dp_phy);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
- drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1161,7 +1462,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
}
@@ -1213,22 +1514,32 @@ static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
return 0;
}
-static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
- msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ int delay_us;
+
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE, dp_phy);
+
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+ fsleep(delay_us);
}
static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries = 0, ret = 0;
u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_2;
@@ -1243,16 +1554,16 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
state_ctrl_bit = 2;
}
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, state_ctrl_bit);
if (ret)
return ret;
- msm_dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern, dp_phy);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1262,7 +1573,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
@@ -1271,9 +1582,32 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
+static int msm_dp_ctrl_link_train_1_2(struct msm_dp_ctrl_private *ctrl,
+ int *training_step, enum drm_dp_phy dp_phy)
+{
+ int ret;
+
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #1 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 on phy %d successful\n", dp_phy);
+
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #2 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 on phy %d successful\n", dp_phy);
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
+ int i;
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
@@ -1286,8 +1620,6 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- msm_dp_link_reset_phy_params_vx_px(ctrl->link);
-
msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
@@ -1302,26 +1634,29 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
+ for (i = ctrl->link->lttpr_count - 1; i >= 0; i--) {
+ enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
+
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, dp_phy);
+ msm_dp_ctrl_clear_training_pattern(ctrl, dp_phy);
+
+ if (ret)
+ break;
+ }
+
if (ret) {
- DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ DRM_ERROR("link training of LTTPR(s) failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
-
- ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, DP_PHY_DPRX);
if (ret) {
- DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ DRM_ERROR("link training on sink failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
-
end:
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
return ret;
}
@@ -1331,7 +1666,7 @@ static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl,
{
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_ctrl_mainlink_enable(ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
@@ -1464,33 +1799,55 @@ static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
return ret;
}
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable)
+static void msm_dp_ctrl_enable_sdp(struct msm_dp_ctrl_private *ctrl)
{
- struct msm_dp_ctrl_private *ctrl;
+ /* trigger sdp */
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, 0x0);
+}
- ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+static void msm_dp_ctrl_psr_enter(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
- /*
- * all dp controller programmable registers will not
- * be reset to default value after DP_SW_RESET
- * therefore interrupt mask bits have to be updated
- * to enable/disable interrupts
- */
- msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_ENTER;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
+}
+
+static void msm_dp_ctrl_psr_exit(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
+
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
+
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_EXIT;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
}
void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl)
{
- u8 cfg;
struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 cfg;
if (!ctrl->panel->psr_cap.version)
return;
- msm_dp_catalog_ctrl_config_psr(ctrl->catalog);
+ /* enable PSR1 function */
+ cfg = msm_dp_read_link(ctrl, REG_PSR_CONFIG);
+ cfg |= PSR1_SUPPORTED;
+ msm_dp_write_link(ctrl, REG_PSR_CONFIG, cfg);
+
+ msm_dp_ctrl_config_psr_interrupt(ctrl);
+ msm_dp_ctrl_enable_sdp(ctrl);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
@@ -1516,29 +1873,37 @@ void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter)
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true);
+ msm_dp_ctrl_psr_enter(ctrl);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_ctrl_psr_exit(ctrl);
return;
}
msm_dp_ctrl_push_idle(msm_dp_ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
+ msm_dp_ctrl_psr_mainlink_disable(ctrl);
} else {
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
+ msm_dp_ctrl_psr_mainlink_enable(ctrl);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_ctrl_psr_exit(ctrl);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
msm_dp_ctrl_wait4video_ready(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
}
}
+static void msm_dp_ctrl_phy_reset(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL,
+ DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL, 0x0);
+}
+
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -1547,7 +1912,7 @@ void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
@@ -1562,7 +1927,7 @@ void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -1573,7 +1938,7 @@ static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
struct phy *phy = ctrl->phy;
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &ctrl->phy_opts);
/*
@@ -1604,9 +1969,9 @@ static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -1636,15 +2001,98 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
if (ret)
goto end;
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
+#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
+
+static void msm_dp_ctrl_send_phy_pattern(struct msm_dp_ctrl_private *ctrl,
+ u32 pattern)
+{
+ u32 value = 0x0;
+
+ /* Make sure to clear the current pattern before starting a new one */
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0x0);
+
+ drm_dbg_dp(ctrl->drm_dev, "pattern: %#x\n", pattern);
+ switch (pattern) {
+ case DP_PHY_TEST_PATTERN_D10_2:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+ break;
+
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ value &= ~(1 << 16);
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ break;
+
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_PRBS7);
+ break;
+
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+ /* 00111110000011111000001111100000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
+ /* 00001111100000111110000011111000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
+ /* 1111100000111110 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
+ break;
+
+ case DP_PHY_TEST_PATTERN_CP2520:
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+
+ value = DP_HBR2_ERM_PATTERN;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+ break;
+
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL,
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+ break;
+
+ default:
+ drm_dbg_dp(ctrl->drm_dev,
+ "No valid test pattern requested: %#x\n", pattern);
+ break;
+ }
+}
+
static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1659,11 +2107,11 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
- msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_ctrl_send_phy_pattern(ctrl, pattern_requested);
+ msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
- pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+ pattern_sent = msm_dp_read_link(ctrl, REG_DP_MAINLINK_READY);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1805,7 +2253,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
@@ -1860,10 +2308,10 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
@@ -1885,10 +2333,10 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
@@ -1902,7 +2350,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
/* stop link training before start re training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
}
rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
@@ -1926,7 +2374,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
* link training failed
* end txing train pattern here
*/
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
@@ -1942,6 +2390,62 @@ static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl)
return msm_dp_ctrl_setup_main_link(ctrl, &training_step);
}
+static void msm_dp_ctrl_config_msa(struct msm_dp_ctrl_private *ctrl,
+ u32 rate, u32 stream_rate_khz,
+ bool is_ycbcr_420)
+{
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+ u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+ u32 const link_rate_hbr2 = 540000;
+ u32 const link_rate_hbr3 = 810000;
+ unsigned long den, num;
+
+ if (rate == link_rate_hbr3)
+ pixel_div = 6;
+ else if (rate == 162000 || rate == 270000)
+ pixel_div = 2;
+ else if (rate == link_rate_hbr2)
+ pixel_div = 4;
+ else
+ DRM_ERROR("Invalid pixel mux divider\n");
+
+ dispcc_input_rate = (rate * 10) / pixel_div;
+
+ rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+ (unsigned long)(1 << 16) - 1,
+ (unsigned long)(1 << 16) - 1, &den, &num);
+
+ den = ~(den - num);
+ den = den & 0xFFFF;
+ pixel_m = num;
+ pixel_n = den;
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+ if (nvid < nvid_fixed) {
+ u32 temp;
+
+ temp = (nvid_fixed / nvid) * nvid;
+ mvid = (nvid_fixed / nvid) * mvid;
+ nvid = temp;
+ }
+
+ if (is_ycbcr_420)
+ mvid /= 2;
+
+ if (link_rate_hbr2 == rate)
+ nvid *= 2;
+
+ if (link_rate_hbr3 == rate)
+ nvid *= 3;
+
+ drm_dbg_dp(ctrl->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_MVID, mvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_NVID, nvid);
+}
+
int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train)
{
int ret = 0;
@@ -1997,7 +2501,7 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
/*
* Set up transfer unit values and set controller state to send
@@ -2007,20 +2511,22 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_configure_source_params(ctrl);
- msm_dp_catalog_ctrl_config_msa(ctrl->catalog,
+ msm_dp_ctrl_config_msa(ctrl,
ctrl->link->link_params.rate,
pixel_rate_orig,
ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420);
+ msm_dp_panel_clear_dsc_dto(ctrl->panel);
+
msm_dp_ctrl_setup_tr_unit(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
- mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ mainlink_ready = msm_dp_ctrl_mainlink_ready(ctrl);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
@@ -2036,12 +2542,12 @@ void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
/* set dongle to D3 (power off) mode */
msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2069,7 +2575,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -2091,11 +2597,11 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2122,7 +2628,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->panel->psr_cap.version) {
- isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
+ isr = msm_dp_ctrl_get_psr_interrupt(ctrl);
if (isr)
complete(&ctrl->psr_op_comp);
@@ -2137,8 +2643,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
- isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog);
-
+ isr = msm_dp_ctrl_get_interrupt(ctrl);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
@@ -2152,6 +2657,11 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ret = IRQ_HANDLED;
}
+ /* DP aux isr */
+ isr = msm_dp_ctrl_get_aux_interrupt(ctrl);
+ if (isr)
+ ret |= msm_dp_aux_isr(ctrl->aux, isr);
+
return ret;
}
@@ -2207,14 +2717,14 @@ static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl)
struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy)
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base)
{
struct msm_dp_ctrl_private *ctrl;
int ret;
- if (!dev || !panel || !aux ||
- !link || !catalog) {
+ if (!dev || !panel || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -2245,9 +2755,10 @@ struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link
ctrl->panel = panel;
ctrl->aux = aux;
ctrl->link = link;
- ctrl->catalog = catalog;
ctrl->dev = dev;
ctrl->phy = phy;
+ ctrl->ahb_base = ahb_base;
+ ctrl->link_base = link_base;
ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl);
if (ret) {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b7abfedbf574..124b9b21bb7f 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -9,7 +9,6 @@
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
-#include "dp_catalog.h"
struct msm_dp_ctrl {
bool wide_bus_en;
@@ -25,12 +24,15 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl);
irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl);
-struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
- struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy);
-
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev,
+ struct msm_dp_link *link,
+ struct msm_dp_panel *panel,
+ struct drm_dp_aux *aux,
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base);
+
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
@@ -41,4 +43,7 @@ void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl);
int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 22fd946ee201..cf3838fcd154 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -5,11 +5,12 @@
#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+#ifdef CONFIG_DEBUG_FS
+
#include <linux/debugfs.h>
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
#include "dp_debug.h"
@@ -235,3 +236,5 @@ int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
return 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bbc47d86ae9e..d87d47cc7ec3 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -13,12 +13,12 @@
#include <linux/delay.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_edid.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_ctrl.h"
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_reg.h"
#include "dp_link.h"
@@ -86,7 +86,6 @@ struct msm_dp_display_private {
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
struct msm_dp_panel *panel;
@@ -111,6 +110,18 @@ struct msm_dp_display_private {
bool wide_bus_supported;
struct msm_dp_audio *audio;
+
+ void __iomem *ahb_base;
+ size_t ahb_len;
+
+ void __iomem *aux_base;
+ size_t aux_len;
+
+ void __iomem *link_base;
+ size_t link_len;
+
+ void __iomem *p0_base;
+ size_t p0_len;
};
struct msm_dp_desc {
@@ -127,6 +138,11 @@ static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
{}
};
+static const struct msm_dp_desc msm_dp_desc_sdm845[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ {}
+};
+
static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
@@ -179,7 +195,7 @@ static const struct of_device_id msm_dp_dt_match[] = {
{ .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
{ .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
{ .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
- { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sdm845 },
{ .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
{ .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
{ .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
@@ -276,9 +292,7 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
struct drm_device *drm = priv->dev;
dp->msm_dp_display.drm_dev = drm;
- priv->dp[dp->id] = &dp->msm_dp_display;
-
-
+ priv->kms->dp[dp->id] = &dp->msm_dp_display;
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
@@ -288,13 +302,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
goto end;
}
-
- rc = msm_dp_register_audio_driver(dev, dp->audio);
- if (rc) {
- DRM_ERROR("Audio registration Dp failed\n");
- goto end;
- }
-
rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
@@ -316,11 +323,10 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
of_dp_aux_depopulate_bus(dp->aux);
- msm_dp_unregister_audio_driver(dev, dp->audio);
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
- priv->dp[dp->id] = NULL;
+ priv->kms->dp[dp->id] = NULL;
}
static const struct component_ops msm_dp_display_comp_ops = {
@@ -367,17 +373,21 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
-static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+static int msm_dp_display_lttpr_init(struct msm_dp_display_private *dp, u8 *dpcd)
{
- u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
- int rc;
+ int rc, lttpr_count;
- if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
- return;
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dpcd, dp->link->lttpr_common_caps))
+ return 0;
- rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
- if (rc)
+ lttpr_count = drm_dp_lttpr_count(dp->link->lttpr_common_caps);
+ rc = drm_dp_lttpr_init(dp->aux, lttpr_count);
+ if (rc) {
DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+ return 0;
+ }
+
+ return lttpr_count;
}
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
@@ -385,12 +395,17 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
- rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ rc = drm_dp_read_dpcd_caps(dp->aux, dpcd);
if (rc)
goto end;
- msm_dp_display_lttpr_init(dp);
+ dp->link->lttpr_count = msm_dp_display_lttpr_init(dp, dpcd);
+
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ if (rc)
+ goto end;
msm_dp_link_process_request(dp->link);
@@ -455,7 +470,8 @@ static void msm_dp_display_host_init(struct msm_dp_display_private *dp)
dp->phy_initialized);
msm_dp_ctrl_core_clk_enable(dp->ctrl);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_enable_irq(dp->ctrl);
msm_dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -466,7 +482,8 @@ static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp)
dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_disable_irq(dp->ctrl);
msm_dp_aux_deinit(dp->aux);
msm_dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
@@ -626,9 +643,9 @@ static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
- dp->audio_supported)
- msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
+ if (dp->audio_supported)
+ drm_connector_hdmi_audio_plugged_notify(msm_dp_display->connector,
+ plugged);
}
static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
@@ -747,21 +764,10 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
if (rc) {
DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
- dp->catalog = NULL;
goto error;
}
- dp->catalog = msm_dp_catalog_get(dev);
- if (IS_ERR(dp->catalog)) {
- rc = PTR_ERR(dp->catalog);
- DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
- dp->catalog = NULL;
- goto error;
- }
-
- dp->aux = msm_dp_aux_get(dev, dp->catalog,
- phy,
- dp->msm_dp_display.is_edp);
+ dp->aux = msm_dp_aux_get(dev, phy, dp->msm_dp_display.is_edp, dp->aux_base);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -777,7 +783,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
- dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->link_base, dp->p0_base);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -786,8 +792,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
}
dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
- dp->catalog,
- phy);
+ phy, dp->ahb_base, dp->link_base);
if (IS_ERR(dp->ctrl)) {
rc = PTR_ERR(dp->ctrl);
DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
@@ -795,7 +800,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->link_base);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -907,19 +912,6 @@ static int msm_dp_display_disable(struct msm_dp_display_private *dp)
return 0;
}
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev)
-{
- bool plugged;
-
- msm_dp_display->plugged_cb = fn;
- msm_dp_display->codec_dev = codec_dev;
- plugged = msm_dp_display->link_ready;
- msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
-
- return 0;
-}
-
/**
* msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
@@ -1031,7 +1023,14 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
return;
}
- msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state);
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->ahb_len,
+ msm_dp_display->ahb_base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->aux_len,
+ msm_dp_display->aux_base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->link_len,
+ msm_dp_display->link_base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->p0_len,
+ msm_dp_display->p0_base, "dp_p0");
mutex_unlock(&msm_dp_display->event_mutex);
}
@@ -1154,7 +1153,7 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog);
+ hpd_isr_status = msm_dp_aux_get_hpd_intr_status(dp->aux);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
@@ -1181,9 +1180,6 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
/* DP controller isr */
ret |= msm_dp_ctrl_isr(dp->ctrl);
- /* DP aux isr */
- ret |= msm_dp_aux_isr(dp->aux);
-
return ret;
}
@@ -1281,6 +1277,80 @@ static int msm_dp_display_get_connector_type(struct platform_device *pdev,
return connector_type;
}
+static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
+
+ return base;
+}
+
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
+static int msm_dp_display_get_io(struct msm_dp_display_private *display)
+{
+ struct platform_device *pdev = display->msm_dp_display.pdev;
+
+ display->ahb_base = msm_dp_ioremap(pdev, 0, &display->ahb_len);
+ if (IS_ERR(display->ahb_base))
+ return PTR_ERR(display->ahb_base);
+
+ display->aux_base = msm_dp_ioremap(pdev, 1, &display->aux_len);
+ if (IS_ERR(display->aux_base)) {
+ if (display->aux_base != ERR_PTR(-EINVAL)) {
+ DRM_ERROR("unable to remap aux region: %pe\n", display->aux_base);
+ return PTR_ERR(display->aux_base);
+ }
+
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * msm_dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (display->ahb_len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ display->ahb_len = DP_DEFAULT_AHB_SIZE;
+ display->aux_base = display->ahb_base + DP_DEFAULT_AUX_OFFSET;
+ display->aux_len = DP_DEFAULT_AUX_SIZE;
+ display->link_base = display->ahb_base + DP_DEFAULT_LINK_OFFSET;
+ display->link_len = DP_DEFAULT_LINK_SIZE;
+ display->p0_base = display->ahb_base + DP_DEFAULT_P0_OFFSET;
+ display->p0_len = DP_DEFAULT_P0_SIZE;
+
+ return 0;
+ }
+
+ display->link_base = msm_dp_ioremap(pdev, 2, &display->link_len);
+ if (IS_ERR(display->link_base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", display->link_base);
+ return PTR_ERR(display->link_base);
+ }
+
+ display->p0_base = msm_dp_ioremap(pdev, 3, &display->p0_len);
+ if (IS_ERR(display->p0_base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", display->p0_base);
+ return PTR_ERR(display->p0_base);
+ }
+
+ return 0;
+}
+
static int msm_dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1307,6 +1377,10 @@ static int msm_dp_display_probe(struct platform_device *pdev)
dp->msm_dp_display.is_edp =
(dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+ rc = msm_dp_display_get_io(dp);
+ if (rc)
+ return rc;
+
rc = msm_dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
@@ -1369,7 +1443,7 @@ static int msm_dp_pm_runtime_suspend(struct device *dev)
if (dp->msm_dp_display.is_edp) {
msm_dp_display_host_phy_exit(dp);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+ msm_dp_aux_hpd_disable(dp->aux);
}
msm_dp_display_host_deinit(dp);
@@ -1390,7 +1464,7 @@ static int msm_dp_pm_runtime_resume(struct device *dev)
*/
msm_dp_display_host_init(dp);
if (dp->msm_dp_display.is_edp) {
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_aux_hpd_enable(dp->aux);
msm_dp_display_host_phy_init(dp);
}
@@ -1652,8 +1726,6 @@ void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
/* populate wide_bus_support to different layers */
msm_dp_display->ctrl->wide_bus_en =
msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
- msm_dp_display->catalog->wide_bus_en =
- msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
}
void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
@@ -1677,10 +1749,8 @@ void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
return;
}
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
-
- /* enable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
+ msm_dp_aux_hpd_enable(dp->aux);
+ msm_dp_aux_hpd_intr_enable(dp->aux);
msm_dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
@@ -1693,9 +1763,9 @@ void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
mutex_lock(&dp->event_mutex);
- /* disable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+
+ msm_dp_aux_hpd_intr_disable(dp->aux);
+ msm_dp_aux_hpd_disable(dp->aux);
msm_dp_display->internal_hpd = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ecbc2d92f546..cc6e2cab36e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -7,7 +7,6 @@
#define _DP_DISPLAY_H_
#include "dp_panel.h"
-#include <sound/hdmi-codec.h>
#include "disp/msm_disp_snapshot.h"
#define DP_MAX_PIXEL_CLK_KHZ 675000
@@ -15,7 +14,6 @@
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
- struct device *codec_dev;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
bool link_ready;
@@ -25,14 +23,10 @@ struct msm_dp {
bool is_edp;
bool internal_hpd;
- hdmi_codec_plugged_cb plugged_cb;
-
struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev);
int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index cca57e56c906..9a461ab2f32f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "dp_audio.h"
#include "dp_drm.h"
/**
@@ -19,7 +20,8 @@
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+msm_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct msm_dp *dp;
@@ -114,6 +116,9 @@ static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.hpd_disable = msm_dp_bridge_hpd_disable,
.hpd_notify = msm_dp_bridge_hpd_notify,
.debugfs_init = msm_dp_bridge_debugfs_init,
+
+ .dp_audio_prepare = msm_dp_audio_prepare,
+ .dp_audio_shutdown = msm_dp_audio_shutdown,
};
static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -296,14 +301,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
- if (!msm_dp_bridge)
- return -ENOMEM;
+ msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge,
+ msm_dp_display->is_edp ? &msm_edp_bridge_ops :
+ &msm_dp_bridge_ops);
+ if (IS_ERR(msm_dp_bridge))
+ return PTR_ERR(msm_dp_bridge);
msm_dp_bridge->msm_dp_display = msm_dp_display;
bridge = &msm_dp_bridge->bridge;
- bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
bridge->type = msm_dp_display->connector_type;
bridge->ycbcr_420_allowed = yuv_supported;
@@ -320,9 +326,13 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
*/
if (!msm_dp_display->is_edp) {
bridge->ops =
+ DRM_BRIDGE_OP_DP_AUDIO |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_MODES;
+ bridge->hdmi_audio_dev = &msm_dp_display->pdev->dev;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dai_port = -1;
}
rc = devm_drm_bridge_add(dev->dev, bridge);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1a1fbb2d7d4f..66e1bbd80db3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
@@ -714,21 +715,21 @@ end:
static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
- int len;
+ int ret;
link->prev_sink_count = link->msm_dp_link.sink_count;
- len = drm_dp_read_sink_count(link->aux);
- if (len < 0) {
+ ret = drm_dp_read_sink_count(link->aux);
+ if (ret < 0) {
DRM_ERROR("DP parse sink count failed\n");
- return len;
+ return ret;
}
- link->msm_dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = ret;
- len = drm_dp_dpcd_read_link_status(link->aux,
- link->link_status);
- if (len < DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (ret < 0) {
DRM_ERROR("DP link status read failed\n");
- return len;
+ return ret;
}
return msm_dp_link_parse_request(link);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 8db5d5698a97..ba47c6d19fbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -7,6 +7,7 @@
#define _DP_LINK_H_
#include "dp_aux.h"
+#include <drm/display/drm_dp_helper.h>
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
@@ -60,6 +61,9 @@ struct msm_dp_link_phy_params {
};
struct msm_dp_link {
+ u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int lttpr_count;
+
u32 sink_request;
u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 92415bf8aa16..15b7f6c7146e 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -4,6 +4,7 @@
*/
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_utils.h"
#include <drm/drm_connector.h>
@@ -11,6 +12,10 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#include <linux/io.h>
+
+#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
+
#define DP_MAX_NUM_DP_LANES 4
#define DP_LINK_RATE_HBR2 540000 /* kbytes */
@@ -20,10 +25,46 @@ struct msm_dp_panel_private {
struct msm_dp_panel msm_dp_panel;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
+ void __iomem *p0_base;
bool panel_on;
};
+static inline u32 msm_dp_read_link(struct msm_dp_panel_private *panel, u32 offset)
+{
+ return readl_relaxed(panel->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->link_base + offset);
+}
+
+static inline void msm_dp_write_p0(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->p0_base + offset);
+}
+
+static inline u32 msm_dp_read_p0(struct msm_dp_panel_private *panel,
+ u32 offset)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ return readl_relaxed(panel->p0_base + offset);
+}
+
static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
{
ssize_t rlen;
@@ -47,7 +88,7 @@ static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
- int rc;
+ int rc, max_lttpr_lanes, max_lttpr_rate;
struct msm_dp_panel_private *panel;
struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
@@ -75,6 +116,16 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
if (link_info->rate > msm_dp_panel->max_dp_link_rate)
link_info->rate = msm_dp_panel->max_dp_link_rate;
+ /* Limit data lanes from LTTPR capabilities, if any */
+ max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps);
+ if (max_lttpr_lanes && max_lttpr_lanes < link_info->num_lanes)
+ link_info->num_lanes = max_lttpr_lanes;
+
+ /* Limit link rate from LTTPR capabilities, if any */
+ max_lttpr_rate = drm_dp_lttpr_max_link_rate(panel->link->lttpr_common_caps);
+ if (max_lttpr_rate && max_lttpr_rate < link_info->rate)
+ link_info->rate = max_lttpr_rate;
+
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
@@ -162,7 +213,7 @@ int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
if (!msm_dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
- if (!msm_dp_catalog_link_is_connected(panel->catalog)) {
+ if (!msm_dp_aux_is_link_connected(panel->aux)) {
rc = -ETIMEDOUT;
goto end;
}
@@ -242,9 +293,85 @@ void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel)
}
}
+static void msm_dp_panel_tpg_enable(struct msm_dp_panel *msm_dp_panel,
+ struct drm_display_mode *drm_mode)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
+
+ /* TPG config parameters*/
+ hsync_period = drm_mode->htotal;
+ vsync_period = drm_mode->vtotal;
+
+ display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+ hsync_period);
+ display_v_end = ((vsync_period - (drm_mode->vsync_start -
+ drm_mode->vdisplay))
+ * hsync_period) - 1;
+
+ display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+ display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+ hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+ hsync_end_x = hsync_period - (drm_mode->hsync_start -
+ drm_mode->hdisplay) - 1;
+
+ v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+ hsync_ctl = (hsync_period << 16) |
+ (drm_mode->hsync_end - drm_mode->hsync_start);
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL,
+ DP_TPG_CHECKERED_RECT_PATTERN);
+ msm_dp_write_p0(panel, MMSS_DP_TPG_VIDEO_CONFIG,
+ DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+ DP_TPG_VIDEO_CONFIG_RGB);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE,
+ DP_BIST_ENABLE_DPBIST_EN);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN,
+ DP_TIMING_ENGINE_EN_EN);
+ drm_dbg_dp(panel->drm_dev, "%s: enabled tpg\n", __func__);
+}
+
+static void msm_dp_panel_tpg_disable(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
{
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
if (!msm_dp_panel) {
@@ -253,7 +380,6 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
if (!panel->panel_on) {
drm_dbg_dp(panel->drm_dev,
@@ -262,18 +388,109 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
if (!enable) {
- msm_dp_catalog_panel_tpg_disable(catalog);
+ msm_dp_panel_tpg_disable(msm_dp_panel);
return;
}
- drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
- msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+ drm_dbg_dp(panel->drm_dev, "calling panel's tpg_enable\n");
+ msm_dp_panel_tpg_enable(msm_dp_panel, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+}
+
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_DSC_DTO, 0x0);
+}
+
+static void msm_dp_panel_send_vsc_sdp(struct msm_dp_panel_private *panel, struct dp_sdp *vsc_sdp)
+{
+ u32 header[2];
+ u32 val;
+ int i;
+
+ msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_0, header[0]);
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_1, header[1]);
+
+ for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
+ val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
+ (vsc_sdp->db[i + 3] << 24));
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_2 + i, val);
+ }
+}
+
+static void msm_dp_panel_update_sdp(struct msm_dp_panel_private *panel)
+{
+ u32 hw_revision = panel->msm_dp_panel.hw_revision;
+
+ if (hw_revision >= DP_HW_VERSION_1_0 &&
+ hw_revision < DP_HW_VERSION_1_2) {
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, 0x0);
+ }
+}
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg |= GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 |= GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ msm_dp_panel_send_vsc_sdp(panel, vsc_sdp);
+
+ /* indicates presence of VSC (BIT(6) of MISC1) */
+ misc |= DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=1\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
+}
+
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg &= ~GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 &= ~GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ /* switch back to MSA */
+ misc &= ~DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=0\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
}
static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
{
- struct msm_dp_catalog *catalog;
- struct msm_dp_panel_private *panel;
struct msm_dp_display_mode *msm_dp_mode;
struct drm_dp_vsc_sdp vsc_sdp_data;
struct dp_sdp vsc_sdp;
@@ -284,8 +501,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return -EINVAL;
}
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
msm_dp_mode = &msm_dp_panel->msm_dp_mode;
memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
@@ -312,24 +527,23 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return len;
}
- msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+ msm_dp_panel_enable_vsc_sdp(msm_dp_panel, &vsc_sdp);
return 0;
}
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en)
{
u32 data, total_ver, total_hor;
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
struct drm_display_mode *drm_mode;
u32 width_blanking;
u32 sync_start;
u32 msm_dp_active;
u32 total;
+ u32 reg;
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
@@ -372,7 +586,20 @@ int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
msm_dp_active = data;
- msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active);
+ msm_dp_write_link(panel, REG_DP_TOTAL_HOR_VER, total);
+ msm_dp_write_link(panel, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ msm_dp_write_link(panel, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ msm_dp_write_link(panel, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
+
+ reg = msm_dp_read_p0(panel, MMSS_DP_INTF_CONFIG);
+ if (wide_bus_en)
+ reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
+ else
+ reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
+
+ drm_dbg_dp(panel->drm_dev, "wide_bus_en=%d reg=%#x\n", wide_bus_en, reg);
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_CONFIG, reg);
if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420)
msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel);
@@ -476,13 +703,15 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog)
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!dev || !catalog || !aux || !link) {
+ if (!dev || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -493,8 +722,9 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux
panel->dev = dev;
panel->aux = aux;
- panel->catalog = catalog;
panel->link = link;
+ panel->link_base = link_base;
+ panel->p0_base = p0_base;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 4906f4f09f24..d2cf401506dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -6,6 +6,7 @@
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
+#include <drm/drm_modes.h>
#include <drm/msm_drm.h>
#include "dp_aux.h"
@@ -38,6 +39,7 @@ struct msm_dp_panel {
struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
+ u32 hw_revision;
u32 max_dp_lanes;
u32 max_dp_link_rate;
@@ -47,7 +49,7 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@@ -57,6 +59,11 @@ int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel);
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp);
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel);
+
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
@@ -85,6 +92,8 @@ static inline bool is_lane_count_valid(u32 lane_count)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog);
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 3835c7f5cb98..7c44d4e2cf13 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -11,6 +11,8 @@
/* DP_TX Registers */
#define REG_DP_HW_VERSION (0x00000000)
+#define DP_HW_VERSION_1_0 0x10000000
+#define DP_HW_VERSION_1_2 0x10020000
#define REG_DP_SW_RESET (0x00000010)
#define DP_SW_RESET (0x00000001)
@@ -21,8 +23,25 @@
#define REG_DP_CLK_CTRL (0x00000018)
#define REG_DP_CLK_ACTIVE (0x0000001C)
+
#define REG_DP_INTR_STATUS (0x00000020)
+#define DP_INTR_HPD BIT(0)
+#define DP_INTR_AUX_XFER_DONE BIT(3)
+#define DP_INTR_WRONG_ADDR BIT(6)
+#define DP_INTR_TIMEOUT BIT(9)
+#define DP_INTR_NACK_DEFER BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK BIT(18)
+#define DP_INTR_I2C_DEFER BIT(21)
+#define DP_INTR_PLL_UNLOCKED BIT(24)
+#define DP_INTR_AUX_ERROR BIT(27)
+
#define REG_DP_INTR_STATUS2 (0x00000024)
+#define DP_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
+#define DP_INTR_FRAME_END BIT(6)
+#define DP_INTR_CRC_UPDATED BIT(9)
+
#define REG_DP_INTR_STATUS3 (0x00000028)
#define REG_DP_INTR_STATUS4 (0x0000002C)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 296215877613..d8bb40ef820e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -136,7 +136,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
msm_dsi->next_bridge = ext_bridge;
}
- priv->dsi[msm_dsi->id] = msm_dsi;
+ priv->kms->dsi[msm_dsi->id] = msm_dsi;
return 0;
}
@@ -148,7 +148,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
msm_dsi_tx_buf_free(msm_dsi->host);
- priv->dsi[msm_dsi->id] = NULL;
+ priv->kms->dsi[msm_dsi->id] = NULL;
}
static const struct component_ops dsi_ops = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 87496db203d6..93c028a122f3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -98,6 +98,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
@@ -115,6 +116,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7754dcec33d0..fed8e9b67011 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -221,6 +221,22 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sa8775p_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */
+ { .supply = "refgen" },
+};
+
+static const struct msm_dsi_config sa8775p_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sa8775p_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -257,6 +273,18 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_9_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g_v2_9,
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2_9,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
@@ -294,12 +322,16 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1,
+ &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
&sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_9_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_9_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 120cb65164c1..38f303f2ed04 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -27,9 +27,11 @@
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
+#define MSM_DSI_6G_VER_MINOR_V2_9_0 0x20090000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4d75529c0e85..e0de545d4077 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -119,6 +119,15 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_intf_clk;
+ /*
+ * Clocks which needs to be properly parented between DISPCC and DSI PHY
+ * PLL:
+ */
+ struct clk *byte_src_clk;
+ struct clk *pixel_src_clk;
+ struct clk *dsi_pll_byte_clk;
+ struct clk *dsi_pll_pixel_clk;
+
unsigned long byte_clk_rate;
unsigned long byte_intf_clk_rate;
unsigned long pixel_clk_rate;
@@ -143,7 +152,7 @@ struct msm_dsi_host {
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* DSI v2 TX buffer */
void *tx_buf;
@@ -269,6 +278,38 @@ int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
return ret;
}
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ ret = dsi_clk_init_6g_v2(msm_host);
+ if (ret)
+ return ret;
+
+ msm_host->byte_src_clk = devm_clk_get(dev, "byte_src");
+ if (IS_ERR(msm_host->byte_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->byte_src_clk),
+ "can't get byte_src clock\n");
+
+ msm_host->dsi_pll_byte_clk = devm_clk_get(dev, "dsi_pll_byte");
+ if (IS_ERR(msm_host->dsi_pll_byte_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_byte_clk),
+ "can't get dsi_pll_byte clock\n");
+
+ msm_host->pixel_src_clk = devm_clk_get(dev, "pixel_src");
+ if (IS_ERR(msm_host->pixel_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->pixel_src_clk),
+ "can't get pixel_src clock\n");
+
+ msm_host->dsi_pll_pixel_clk = devm_clk_get(dev, "dsi_pll_pixel");
+ if (IS_ERR(msm_host->dsi_pll_pixel_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_pixel_clk),
+ "can't get dsi_pll_pixel clock\n");
+
+ return 0;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -370,6 +411,26 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ /*
+ * DSI PHY PLLs have to be enabled to allow reparenting to them, so
+ * cannot use assigned-clock-parents.
+ */
+ ret = clk_set_parent(msm_host->byte_src_clk, msm_host->dsi_pll_byte_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent byte_src -> dsi_pll_byte: %d\n", ret);
+
+ ret = clk_set_parent(msm_host->pixel_src_clk, msm_host->dsi_pll_pixel_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent pixel_src -> dsi_pll_pixel: %d\n", ret);
+
+ return dsi_link_clk_set_rate_6g(msm_host);
+}
+
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -1146,10 +1207,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
- msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
+ msm_host->vm = drm_gpuvm_get(priv->kms->vm);
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
- msm_host->aspace,
+ msm_host->vm,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
@@ -1193,10 +1254,10 @@ void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
return;
if (msm_host->tx_gem_obj) {
- msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
- msm_gem_address_space_put(msm_host->aspace);
+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm);
+ drm_gpuvm_put(msm_host->vm);
msm_host->tx_gem_obj = NULL;
- msm_host->aspace = NULL;
+ msm_host->vm = NULL;
}
if (msm_host->tx_buf)
@@ -1327,7 +1388,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
return -EINVAL;
return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, dma_base);
+ priv->kms->vm, dma_base);
}
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4fabb01345aa..ca400924d4ee 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ return drm_bridge_attach(encoder, msm_dsi->next_bridge,
bridge, flags);
}
@@ -461,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
struct drm_connector *connector;
int ret;
- dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_bridge), GFP_KERNEL);
- if (!dsi_bridge)
- return -ENOMEM;
+ dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base,
+ &dsi_mgr_bridge_funcs);
+ if (IS_ERR(dsi_bridge))
+ return PTR_ERR(dsi_bridge);
dsi_bridge->id = msm_dsi->id;
bridge = &dsi_bridge->base;
- bridge->funcs = &dsi_mgr_bridge_funcs;
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c0bcc6828963..221f12db5f8b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -581,6 +581,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sa8775p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8775p_cfgs },
+ { .compatible = "qcom,sar2130p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_sar2130p_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
{ .compatible = "qcom,sm6375-dsi-phy-7nm",
@@ -593,6 +597,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_4nm_8550_cfgs },
{ .compatible = "qcom,sm8650-dsi-phy-4nm",
.data = &dsi_phy_4nm_8650_cfgs },
+ { .compatible = "qcom,sm8750-dsi-phy-3nm",
+ .data = &dsi_phy_3nm_8750_cfgs },
#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1925418d9999..c558f8df1684 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -59,8 +59,11 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 9812b4d69197..af2e30f3f842 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -704,6 +704,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_10nm_pll_restore_state().
+ */
+ if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index a92decbee5b5..8c98f91a5930 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -51,6 +51,8 @@
#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
/* Hardware is V5.2 */
#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
+/* Hardware is V7.0 */
+#define DSI_PHY_7NM_QUIRK_V7_0 BIT(5)
struct dsi_pll_config {
bool enable_ssc;
@@ -129,9 +131,30 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
- if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) {
config->pll_clock_inverters = 0x28;
- else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (pll_freq < 163000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 175000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 325000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 350000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 650000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 700000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 1300000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 4000000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (pll_freq <= 1300000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -250,7 +273,8 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
vco_config_1 = 0x01;
}
- if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
if (pll->vco_current_rate < 1557000000ULL)
vco_config_1 = 0x08;
else
@@ -620,6 +644,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -629,6 +654,9 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
break;
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+ /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)
+ writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5);
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
@@ -907,7 +935,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
udelay(500);
}
@@ -941,7 +970,20 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
lane_ctrl0 = 0x1f;
}
- if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (phy->cphy_mode) {
+ /* TODO: different for second phy */
+ vreg_ctrl_0 = 0x57;
+ vreg_ctrl_1 = 0x41;
+ glbl_rescode_top_ctrl = 0x3d;
+ glbl_rescode_bot_ctrl = 0x38;
+ } else {
+ vreg_ctrl_0 = 0x56;
+ vreg_ctrl_1 = 0x19;
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
+ }
+ } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
vreg_ctrl_1 = 0x41;
@@ -1003,6 +1045,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) ||
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
@@ -1117,7 +1160,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
@@ -1147,6 +1191,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 48000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 98000 },
};
@@ -1289,6 +1337,52 @@ const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
.quirks = DSI_PHY_7NM_QUIRK_V4_3,
};
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_48000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_97800uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_98400uA_regulators,
@@ -1334,3 +1428,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V7_0,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 248541ff4492..5afac09c0d33 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,12 +8,14 @@
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <drm/display/drm_hdmi_state_helper.h>
+#include "msm_kms.h"
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -199,12 +201,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->bridge);
- if (ret < 0) {
- DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
- }
-
return 0;
fail:
@@ -220,28 +216,24 @@ fail:
* The hdmi device:
*/
-#define HDMI_CFG(item, entry) \
- .item ## _names = item ##_names_ ## entry, \
- .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
-
-static const char *hpd_reg_names_8960[] = {"core-vdda"};
-static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
+static const char * const pwr_reg_names_8960[] = {"core-vdda"};
+static const char * const pwr_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static const struct hdmi_platform_config hdmi_tx_8960_config = {
- HDMI_CFG(hpd_reg, 8960),
- HDMI_CFG(hpd_clk, 8960),
+ .pwr_reg_names = pwr_reg_names_8960,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8960),
+ .pwr_clk_names = pwr_clk_names_8960,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8960),
};
-static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
-static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
-static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
-static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+static const char * const pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char * const pwr_clk_names_8x74[] = {"iface", "core", "mdp_core", "alt_iface"};
static const struct hdmi_platform_config hdmi_tx_8974_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
+ .pwr_reg_names = pwr_reg_names_8x74,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8x74),
+ .pwr_clk_names = pwr_clk_names_8x74,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8x74),
};
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
@@ -253,7 +245,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
err = msm_hdmi_init(hdmi);
if (err)
return err;
- priv->hdmi = hdmi;
+ priv->kms->hdmi = hdmi;
return 0;
}
@@ -263,12 +255,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
{
struct msm_drm_private *priv = dev_get_drvdata(master);
- if (priv->hdmi) {
- if (priv->hdmi->bridge)
- msm_hdmi_hpd_disable(priv->hdmi);
-
- msm_hdmi_destroy(priv->hdmi);
- priv->hdmi = NULL;
+ if (priv->kms->hdmi) {
+ msm_hdmi_destroy(priv->kms->hdmi);
+ priv->kms->hdmi = NULL;
}
}
@@ -296,6 +285,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
+ mutex_init(&hdmi->state_mutex);
ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
if (ret && ret != -ENODEV)
@@ -322,20 +312,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (hdmi->irq < 0)
return hdmi->irq;
- hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
- config->hpd_reg_cnt,
- sizeof(hdmi->hpd_regs[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_regs)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_reg_cnt; i++)
- hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
-
- ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get hpd regulators\n");
-
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
config->pwr_reg_cnt,
sizeof(hdmi->pwr_regs[0]),
@@ -350,25 +326,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get pwr regulators\n");
- hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
- config->hpd_clk_cnt,
- sizeof(hdmi->hpd_clks[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_clks)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- struct clk *clk;
-
- clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get hpd clk: %s\n",
- config->hpd_clk_names[i]);
-
- hdmi->hpd_clks[i] = clk;
- }
-
hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
config->pwr_clk_cnt,
sizeof(hdmi->pwr_clks[0]),
@@ -376,17 +333,17 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (!hdmi->pwr_clks)
return -ENOMEM;
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- struct clk *clk;
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ hdmi->pwr_clks[i].id = config->pwr_clk_names[i];
- clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get pwr clk: %s\n",
- config->pwr_clk_names[i]);
+ ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ return ret;
- hdmi->pwr_clks[i] = clk;
- }
+ hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp");
+ if (IS_ERR(hdmi->extp_clk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk),
+ "failed to get extp clock\n");
hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
/* This will catch e.g. -EPROBE_DEFER */
@@ -432,6 +389,48 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
msm_hdmi_put_phy(hdmi);
}
+static int msm_hdmi_runtime_suspend(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+
+ clk_bulk_disable_unprepare(config->pwr_clk_cnt, hdmi->pwr_clks);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+
+ return 0;
+}
+
+static int msm_hdmi_runtime_resume(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ goto fail;
+
+ ret = clk_bulk_prepare_enable(config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ pinctrl_pm_select_sleep_state(dev);
+
+ return ret;
+}
+
+DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL);
+
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
@@ -449,6 +448,7 @@ static struct platform_driver msm_hdmi_driver = {
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
+ .pm = &msm_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a5f481c39277..02cfd46df594 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -41,16 +41,17 @@ struct hdmi {
/* video state: */
bool power_on;
+ bool hpd_enabled;
+ struct mutex state_mutex; /* protects two booleans */
unsigned long int pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator_bulk_data *hpd_regs;
struct regulator_bulk_data *pwr_regs;
- struct clk **hpd_clks;
- struct clk **pwr_clks;
+ struct clk_bulk_data *pwr_clks;
+ struct clk *extp_clk;
struct gpio_desc *hpd_gpiod;
@@ -83,21 +84,12 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
- /* regulators that need to be on for hpd: */
- const char **hpd_reg_names;
- int hpd_reg_cnt;
-
/* regulators that need to be on for screen pwr: */
- const char **pwr_reg_names;
+ const char * const *pwr_reg_names;
int pwr_reg_cnt;
- /* clks that need to be on for hpd: */
- const char **hpd_clk_names;
- const long unsigned *hpd_freq;
- int hpd_clk_cnt;
-
- /* clks that need to be on for screen pwr (ie pixel clk): */
- const char **pwr_clk_names;
+ /* clks that need to be on: */
+ const char * const *pwr_clk_names;
int pwr_clk_cnt;
};
@@ -208,12 +200,12 @@ struct hdmi_codec_daifmt;
struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge);
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/*
* hdmi bridge:
@@ -223,9 +215,9 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge);
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
-void msm_hdmi_hpd_disable(struct hdmi *hdmi);
+ struct drm_bridge *bridge, struct drm_connector *connector);
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 8bb975e82c17..d9a8dc9dae8f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <linux/hdmi.h>
@@ -12,71 +13,9 @@
#include "hdmi.h"
-/* Supported HDMI Audio sample rates */
-#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
-#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
-#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
-#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
-#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
-#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
-#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
-#define MSM_HDMI_SAMPLE_RATE_MAX 7
-
-
-struct hdmi_msm_audio_acr {
- uint32_t n; /* N parameter for clock regeneration */
- uint32_t cts; /* CTS parameter for clock regeneration */
-};
-
-struct hdmi_msm_audio_arcs {
- unsigned long int pixclock;
- struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
-};
-
-#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
-
-/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
-/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
-static const struct hdmi_msm_audio_arcs acr_lut[] = {
- /* 25.200MHz */
- HDMI_MSM_AUDIO_ARCS(25200, {
- {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
- {12288, 25200}, {25088, 28000}, {24576, 25200} }),
- /* 27.000MHz */
- HDMI_MSM_AUDIO_ARCS(27000, {
- {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
- {12288, 27000}, {25088, 30000}, {24576, 27000} }),
- /* 27.027MHz */
- HDMI_MSM_AUDIO_ARCS(27030, {
- {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
- {12288, 27027}, {25088, 30030}, {24576, 27027} }),
- /* 74.250MHz */
- HDMI_MSM_AUDIO_ARCS(74250, {
- {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
- {12288, 74250}, {25088, 82500}, {24576, 74250} }),
- /* 148.500MHz */
- HDMI_MSM_AUDIO_ARCS(148500, {
- {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
- {12288, 148500}, {25088, 165000}, {24576, 148500} }),
-};
-
-static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
- const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
- if (arcs->pixclock == pixclock)
- return arcs;
- }
-
- return NULL;
-}
-
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
uint32_t audio_config;
@@ -94,15 +33,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
enabled = false;
}
- if (enabled) {
- arcs = get_arcs(hdmi->pixclock);
- if (!arcs) {
- DBG("disabling audio: unsupported pixclock: %lu",
- hdmi->pixclock);
- enabled = false;
- }
- }
-
/* Read first before writing */
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
@@ -116,15 +46,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- n = arcs->lut[audio->rate].n;
- cts = arcs->lut[audio->rate].cts;
+ drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts);
- if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ if (audio->rate == 192000 || audio->rate == 176400) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
- } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ } else if (audio->rate == 96000 || audio->rate == 88200) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
@@ -137,13 +64,11 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
- if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ if (audio->rate == 48000 || audio->rate == 96000 ||
+ audio->rate == 192000)
select = ACR_48;
- else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ else if (audio->rate == 44100 || audio->rate == 88200 ||
+ audio->rate == 176400)
select = ACR_44;
else /* default to 32k */
select = ACR_32;
@@ -197,14 +122,13 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- unsigned int rate;
int ret;
drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
@@ -214,25 +138,12 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
switch (params->sample_rate) {
case 32000:
- rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
- break;
case 44100:
- rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
- break;
case 48000:
- rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
- break;
case 88200:
- rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
- break;
case 96000:
- rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
- break;
case 176400:
- rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
- break;
case 192000:
- rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
break;
default:
drm_err(bridge->dev, "rate[%d] not supported!\n",
@@ -245,15 +156,15 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
if (ret)
return ret;
- hdmi->audio.rate = rate;
+ hdmi->audio.rate = params->sample_rate;
hdmi->audio.channels = params->cea.channels;
hdmi->audio.enabled = true;
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 1456354c8af4..46fd58646d32 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -18,52 +18,34 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
-
- pm_runtime_get_sync(&hdmi->pdev->dev);
+ int ret;
- ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
+ pm_runtime_resume_and_get(&hdmi->pdev->dev);
- if (config->pwr_clk_cnt > 0) {
+ if (hdmi->extp_clk) {
DBG("pixclock: %lu", hdmi->pixclock);
- ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
- }
- }
+ ret = clk_set_rate(hdmi->extp_clk, hdmi->pixclock);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set extp clk rate: %d\n", ret);
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- ret = clk_prepare_enable(hdmi->pwr_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
- config->pwr_clk_names[i], ret);
- }
+ ret = clk_prepare_enable(hdmi->extp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable extp clk: %d\n", ret);
}
}
static void power_off(struct drm_bridge *bridge)
{
- struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
- for (i = 0; i < config->pwr_clk_cnt; i++)
- clk_disable_unprepare(hdmi->pwr_clks[i]);
-
- ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
+ if (hdmi->extp_clk)
+ clk_disable_unprepare(hdmi->extp_clk);
pm_runtime_put(&hdmi->pdev->dev);
}
@@ -320,13 +302,16 @@ static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+ mutex_lock(&hdmi->state_mutex);
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
+ mutex_unlock(&hdmi->state_mutex);
+
+ if (connector->display_info.is_hdmi)
+ msm_hdmi_audio_update(hdmi);
drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
@@ -349,7 +334,10 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
- msm_hdmi_set_mode(hdmi, false);
+
+ /* Keep the HDMI enabled if the HPD is enabled */
+ mutex_lock(&hdmi->state_mutex);
+ msm_hdmi_set_mode(hdmi, hdmi->hpd_enabled);
msm_hdmi_phy_powerdown(phy);
@@ -360,6 +348,7 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
+ mutex_unlock(&hdmi->state_mutex);
}
static void msm_hdmi_set_timings(struct hdmi *hdmi,
@@ -411,9 +400,6 @@ static void msm_hdmi_set_timings(struct hdmi *hdmi,
frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
-
- if (hdmi->connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
@@ -440,7 +426,6 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual;
@@ -453,8 +438,8 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
actual = kms->funcs->round_pixclk(kms,
tmds_rate,
hdmi_bridge->hdmi->encoder);
- else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
+ else if (hdmi->extp_clk)
+ actual = clk_round_rate(hdmi->extp_clk, tmds_rate);
else
actual = tmds_rate;
@@ -474,6 +459,8 @@ static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hpd_enable = msm_hdmi_hpd_enable,
+ .hpd_disable = msm_hdmi_hpd_disable,
.hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
.hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
@@ -488,7 +475,7 @@ msm_hdmi_hotplug_work(struct work_struct *work)
container_of(work, struct hdmi_bridge, hpd_work);
struct drm_bridge *bridge = &hdmi_bridge->base;
- drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge, hdmi_bridge->hdmi->connector));
}
/* initialize bridge */
@@ -498,16 +485,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
struct hdmi_bridge *hdmi_bridge;
int ret;
- hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
- sizeof(*hdmi_bridge), GFP_KERNEL);
- if (!hdmi_bridge)
- return -ENOMEM;
+ hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base,
+ &msm_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi_bridge))
+ return PTR_ERR(hdmi_bridge);
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
- bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
bridge->vendor = "Qualcomm";
@@ -515,6 +501,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_EDID;
bridge->hdmi_audio_max_i2s_playback_channels = 8;
bridge->hdmi_audio_dev = &hdmi->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 9ce0ffa35417..114b0d507700 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -60,68 +60,30 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
}
}
-static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
-{
- const struct hdmi_platform_config *config = hdmi->config;
- struct device *dev = &hdmi->pdev->dev;
- int i, ret;
-
- if (enable) {
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev,
- "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
- }
- } else {
- for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
- }
-}
-
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
int ret;
unsigned long flags;
- ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
- goto fail;
- }
-
- ret = pinctrl_pm_select_default_state(dev);
- if (ret) {
- DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
- goto fail;
- }
-
if (hdmi->hpd_gpiod)
gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
- pm_runtime_get_sync(dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(dev);
+ if (WARN_ON(ret))
+ return;
+ mutex_lock(&hdmi->state_mutex);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
msm_hdmi_set_mode(hdmi, true);
+ hdmi->hpd_enabled = true;
+ mutex_unlock(&hdmi->state_mutex);
+
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
/* enable HPD events: */
@@ -140,34 +102,23 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
-
- return 0;
-
-fail:
- return ret;
}
-void msm_hdmi_hpd_disable(struct hdmi *hdmi)
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge)
{
- const struct hdmi_platform_config *config = hdmi->config;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
struct device *dev = &hdmi->pdev->dev;
- int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- msm_hdmi_set_mode(hdmi, false);
+ mutex_lock(&hdmi->state_mutex);
+ hdmi->hpd_enabled = false;
+ msm_hdmi_set_mode(hdmi, hdmi->power_on);
+ mutex_unlock(&hdmi->state_mutex);
- enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
-
- ret = pinctrl_pm_select_sleep_state(dev);
- if (ret)
- dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
-
- ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
@@ -202,14 +153,16 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status;
+ u32 hpd_int_status = 0;
+ int ret;
- pm_runtime_get_sync(&hdmi->pdev->dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ goto out;
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
- enable_hpd_clocks(hdmi, false);
+out:
pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
@@ -224,8 +177,8 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge)
+enum drm_connector_status
+msm_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 7aa500d24240..ebefea4fb408 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -107,11 +107,15 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
if (num == 0)
return num;
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
init_ddc(hdmi_i2c);
ret = ddc_clear_irq(hdmi_i2c);
if (ret)
- return ret;
+ goto fail;
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
@@ -169,7 +173,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
- return ret;
+ goto fail;
}
ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
@@ -202,7 +206,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
+ pm_runtime_put(&hdmi->pdev->dev);
+
return i;
+
+fail:
+ pm_runtime_put(&hdmi->pdev->dev);
+ return ret;
}
static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 03120c54ced6..667573f1db7c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -58,7 +58,11 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret = 0;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "runtime resume failed: %d\n", ret);
+ return ret;
+ }
ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7ab607252d18..bbda865addae 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -117,6 +117,36 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+#ifdef CONFIG_DRM_MSM_KMS
+static int msm_fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (dev->fb_helper && dev->fb_helper->fb) {
+ seq_puts(m, "fbcon ");
+ fbdev_fb = dev->fb_helper->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_puts(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list msm_kms_debugfs_list[] = {
+ { "fb", msm_fb_show },
+};
+
/*
* Display Snapshot:
*/
@@ -180,6 +210,27 @@ static const struct file_operations msm_kms_fops = {
.release = msm_kms_release,
};
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", 0400, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
+ if (priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+}
+#else /* ! CONFIG_DRM_MSM_KMS */
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+}
+#endif
+
/*
* Other debugfs:
*/
@@ -208,6 +259,35 @@ DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
shrink_get, shrink_set,
"0x%08llx\n");
+/*
+ * Return the number of microseconds to wait until stall-on-fault is
+ * re-enabled. If 0 then it is already enabled or will be re-enabled on the
+ * next submit (unless there's a leftover devcoredump). This is useful for
+ * kernel tests that intentionally produce a fault and check the devcoredump to
+ * wait until the cooldown period is over.
+ */
+
+static int
+stall_reenable_time_get(void *data, u64 *val)
+{
+ struct msm_drm_private *priv = data;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+
+ if (priv->stall_enabled)
+ *val = 0;
+ else
+ *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
+
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
+ stall_reenable_time_get, NULL,
+ "%lld\n");
static int msm_gem_show(struct seq_file *m, void *arg)
{
@@ -238,47 +318,23 @@ static int msm_mm_show(struct seq_file *m, void *arg)
return 0;
}
-static int msm_fb_show(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_framebuffer *fb, *fbdev_fb = NULL;
-
- if (dev->fb_helper && dev->fb_helper->fb) {
- seq_printf(m, "fbcon ");
- fbdev_fb = dev->fb_helper->fb;
- msm_framebuffer_describe(fbdev_fb, m);
- }
-
- mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user ");
- msm_framebuffer_describe(fb, m);
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
};
-static struct drm_info_list msm_kms_debugfs_list[] = {
- { "fb", msm_fb_show },
-};
-
static int late_init_minor(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
if (!minor)
return 0;
+ if (!priv->gpu_pdev)
+ return 0;
+
ret = msm_rd_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
@@ -319,6 +375,9 @@ static void msm_debugfs_gpu_init(struct drm_minor *minor)
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
+ debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
+ priv, &stall_reenable_time_fops);
+
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
@@ -343,20 +402,12 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->gpu_pdev)
msm_debugfs_gpu_init(minor);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
+ if (priv->kms)
+ msm_debugfs_kms_init(minor);
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init)
- priv->kms->funcs->debugfs_init(priv->kms, minor);
-
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c3588dc9e537..9dcc7a596a11 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
-#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -41,17 +40,12 @@
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
* - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
+ * - 1.13.0 - Add VM_BIND
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 12
+#define MSM_VERSION_MINOR 13
#define MSM_VERSION_PATCHLEVEL 0
-static void msm_deinit_vram(struct drm_device *ddev);
-
-static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
-module_param(vram, charp, 0);
-
bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
@@ -60,10 +54,19 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+static bool separate_gpu_kms;
+MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
+module_param(separate_gpu_kms, bool, 0400);
+
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
-static int msm_drm_uninit(struct device *dev)
+bool msm_gpu_no_components(void)
+{
+ return separate_gpu_kms;
+}
+
+static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
@@ -79,16 +82,9 @@ static int msm_drm_uninit(struct device *dev)
if (ddev->registered) {
drm_dev_unregister(ddev);
if (priv->kms)
- drm_atomic_helper_shutdown(ddev);
+ msm_drm_kms_unregister(dev);
}
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before msm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
-
- flush_workqueue(priv->wq);
-
msm_gem_shrinker_cleanup(ddev);
msm_perf_debugfs_cleanup(priv);
@@ -97,120 +93,19 @@ static int msm_drm_uninit(struct device *dev)
if (priv->kms)
msm_drm_kms_uninit(dev);
- msm_deinit_vram(ddev);
-
- component_unbind_all(dev, ddev);
+ if (gpu_ops)
+ gpu_ops->unbind(dev, dev, NULL);
+ else
+ component_unbind_all(dev, ddev);
ddev->dev_private = NULL;
drm_dev_put(ddev);
- destroy_workqueue(priv->wq);
-
return 0;
}
-bool msm_use_mmu(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
-
- /*
- * a2xx comes with its own MMU
- * On other platforms IOMMU can be declared specified either for the
- * MDP/DPU device or for its parent, MDSS device.
- */
- return priv->is_a2xx ||
- device_iommu_mapped(dev->dev) ||
- device_iommu_mapped(dev->dev->parent);
-}
-
-static int msm_init_vram(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct device_node *node;
- unsigned long size = 0;
- int ret = 0;
-
- /* In the device-tree world, we could have a 'memory-region'
- * phandle, which gives us a link to our "vram". Allocating
- * is all nicely abstracted behind the dma api, but we need
- * to know the entire size to allocate it all in one go. There
- * are two cases:
- * 1) device with no IOMMU, in which case we need exclusive
- * access to a VRAM carveout big enough for all gpu
- * buffers
- * 2) device with IOMMU, but where the bootloader puts up
- * a splash screen. In this case, the VRAM carveout
- * need only be large enough for fbdev fb. But we need
- * exclusive access to the buffer to avoid the kernel
- * using those pages for other purposes (which appears
- * as corruption on screen before we have a chance to
- * load and do initial modeset)
- */
-
- node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
- if (node) {
- struct resource r;
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
- size = r.end - r.start + 1;
- DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
-
- /* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire DMA chunk carved out in early startup in
- * mach-msm:
- */
- } else if (!msm_use_mmu(dev)) {
- DRM_INFO("using %s VRAM carveout\n", vram);
- size = memparse(vram, NULL);
- }
-
- if (size) {
- unsigned long attrs = 0;
- void *p;
-
- priv->vram.size = size;
-
- drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- spin_lock_init(&priv->vram.lock);
-
- attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
- attrs |= DMA_ATTR_WRITE_COMBINE;
-
- /* note that for no-kernel-mapping, the vaddr returned
- * is bogus, but non-null if allocation succeeded:
- */
- p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, attrs);
- if (!p) {
- DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
- priv->vram.paddr = 0;
- return -ENOMEM;
- }
-
- DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
- (uint32_t)priv->vram.paddr,
- (uint32_t)(priv->vram.paddr + size));
- }
-
- return ret;
-}
-
-static void msm_deinit_vram(struct drm_device *ddev)
-{
- struct msm_drm_private *priv = ddev->dev_private;
- unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
-
- if (!priv->vram.paddr)
- return;
-
- drm_mm_takedown(&priv->vram.mm);
- dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
- attrs);
-}
-
-static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+static int msm_drm_init(struct device *dev, const struct drm_driver *drv,
+ const struct component_ops *gpu_ops)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
@@ -227,12 +122,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- priv->wq = alloc_ordered_workqueue("msm", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto err_put_dev;
- }
-
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -245,6 +134,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
+ /* Initialize stall-on-fault */
+ spin_lock_init(&priv->fault_stall_lock);
+ priv->stall_enabled = true;
+
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->lru.lock);
@@ -253,19 +146,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
}
- ret = msm_init_vram(ddev);
- if (ret)
- goto err_destroy_wq;
-
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ if (gpu_ops)
+ ret = gpu_ops->bind(dev, dev, NULL);
+ else
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_deinit_vram;
+ goto err_put_dev;
ret = msm_gem_shrinker_init(ddev);
if (ret)
@@ -275,11 +167,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_drm_kms_init(dev, drv);
if (ret)
goto err_msm_uninit;
- } else {
- /* valid only for the dummy headless case, where of_node=NULL */
- WARN_ON(dev->of_node);
- ddev->driver_features &= ~DRIVER_MODESET;
- ddev->driver_features &= ~DRIVER_ATOMIC;
}
ret = drm_dev_register(ddev, 0);
@@ -290,22 +177,16 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- if (priv->kms_init) {
- drm_kms_helper_poll_init(ddev);
- drm_client_setup(ddev, NULL);
- }
+ if (priv->kms_init)
+ msm_drm_kms_post_init(dev);
return 0;
err_msm_uninit:
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, gpu_ops);
return ret;
-err_deinit_vram:
- msm_deinit_vram(ddev);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@@ -329,11 +210,42 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+/**
+ * msm_context_vm - lazily create the context's VM
+ *
+ * @dev: the drm device
+ * @ctx: the context
+ *
+ * The VM is lazily created, so that userspace has a chance to opt-in to having
+ * a userspace managed VM before the VM is created.
+ *
+ * Note that this does not return a reference to the VM. Once the VM is created,
+ * it exists for the lifetime of the context.
+ */
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
+{
+ static DEFINE_MUTEX(init_lock);
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* Once ctx->vm is created it is valid for the lifetime of the context: */
+ if (ctx->vm)
+ return ctx->vm;
+
+ mutex_lock(&init_lock);
+ if (!ctx->vm) {
+ ctx->vm = msm_gpu_create_private_vm(
+ priv->gpu, current, !ctx->userspace_managed_vm);
+
+ }
+ mutex_unlock(&init_lock);
+
+ return ctx->vm;
+}
+
static int context_init(struct drm_device *dev, struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -345,7 +257,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
- ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
@@ -363,23 +274,24 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
return context_init(dev, file);
}
-static void context_close(struct msm_file_private *ctx)
+static void context_close(struct msm_context *ctx)
{
+ ctx->closed = true;
msm_submitqueue_close(ctx);
- msm_file_private_put(ctx);
+ msm_context_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
/*
* It is not possible to set sysprof param to non-zero if gpu
* is not initialized:
*/
if (priv->gpu)
- msm_file_private_set_sysprof(ctx, priv->gpu, 0);
+ msm_context_set_sysprof(ctx, priv->gpu, 0);
context_close(ctx);
}
@@ -511,11 +423,14 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
uint64_t *iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -523,7 +438,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
- return msm_gem_get_iova(obj, ctx->aspace, iova);
+ return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
}
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
@@ -531,19 +446,23 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
uint64_t iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
/* Only supported if per-process address space is supported: */
- if (priv->gpu->aspace == ctx->aspace)
+ if (priv->gpu->vm == vm)
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
- return msm_gem_set_iova(obj, ctx->aspace, iova);
+ return msm_gem_set_iova(obj, vm, iova);
}
static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
@@ -551,6 +470,7 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
u32 metadata_size)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *new_metadata;
void *buf;
int ret;
@@ -568,8 +488,14 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
if (ret)
goto out;
- msm_obj->metadata =
+ new_metadata =
krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ if (!new_metadata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msm_obj->metadata = new_metadata;
msm_obj->metadata_size = metadata_size;
memcpy(msm_obj->metadata, buf, metadata_size);
@@ -671,7 +597,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
break;
case MSM_INFO_GET_FLAGS:
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
break;
}
@@ -868,6 +794,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
};
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
@@ -889,17 +816,27 @@ static const struct file_operations fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#define DRIVER_FEATURES_GPU ( \
+ DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
+ DRIVER_RENDER | \
+ DRIVER_SYNCOBJ | \
+ DRIVER_SYNCOBJ_TIMELINE | \
+ 0 )
+
+#define DRIVER_FEATURES_KMS ( \
+ DRIVER_GEM | \
+ DRIVER_ATOMIC | \
+ DRIVER_MODESET | \
+ 0 )
+
static const struct drm_driver msm_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_RENDER |
- DRIVER_ATOMIC |
- DRIVER_MODESET |
- DRIVER_SYNCOBJ_TIMELINE |
- DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS,
.open = msm_open,
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
+ .gem_prime_import = msm_gem_prime_import,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
@@ -916,6 +853,45 @@ static const struct drm_driver msm_driver = {
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
+static const struct drm_driver msm_kms_driver = {
+ .driver_features = DRIVER_FEATURES_KMS,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ MSM_FBDEV_DRIVER_OPS,
+ .show_fdinfo = msm_show_fdinfo,
+ .fops = &fops,
+ .name = "msm-kms",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+static const struct drm_driver msm_gpu_driver = {
+ .driver_features = DRIVER_FEATURES_GPU,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ .show_fdinfo = msm_show_fdinfo,
+ .ioctls = msm_ioctls,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
/*
* Componentized driver support:
*/
@@ -926,7 +902,7 @@ static const struct drm_driver msm_driver = {
* is no external component that we need to add since LVDS is within MDP4
* itself.
*/
-static int add_components_mdp(struct device *master_dev,
+static int add_mdp_components(struct device *master_dev,
struct component_match **matchptr)
{
struct device_node *np = master_dev->of_node;
@@ -1030,7 +1006,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- if (of_device_is_available(np))
+ if (of_device_is_available(np) && adreno_has_gpu(np))
drm_of_component_match_add(dev, matchptr, component_compare_of, np);
of_node_put(np);
@@ -1040,12 +1016,16 @@ static int add_gpu_components(struct device *dev,
static int msm_drm_bind(struct device *dev)
{
- return msm_drm_init(dev, &msm_driver);
+ return msm_drm_init(dev,
+ msm_gpu_no_components() ?
+ &msm_kms_driver :
+ &msm_driver,
+ NULL);
}
static void msm_drm_unbind(struct device *dev)
{
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, NULL);
}
const struct component_master_ops msm_drm_ops = {
@@ -1071,14 +1051,16 @@ int msm_drv_probe(struct device *master_dev,
/* Add mdp components if we have KMS. */
if (kms_init) {
- ret = add_components_mdp(master_dev, &match);
+ ret = add_mdp_components(master_dev, &match);
if (ret)
return ret;
}
- ret = add_gpu_components(master_dev, &match);
- if (ret)
- return ret;
+ if (!msm_gpu_no_components()) {
+ ret = add_gpu_components(master_dev, &match);
+ if (ret)
+ return ret;
+ }
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
@@ -1094,29 +1076,34 @@ int msm_drv_probe(struct device *master_dev,
return 0;
}
-/*
- * Platform driver:
- * Used only for headlesss GPU instances
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- return msm_drv_probe(&pdev->dev, NULL, NULL);
+ struct msm_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
+ return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops);
}
-static void msm_pdev_remove(struct platform_device *pdev)
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- component_master_del(&pdev->dev, &msm_drm_ops);
+ msm_drm_uninit(&pdev->dev, ops);
}
-static struct platform_driver msm_platform_driver = {
- .probe = msm_pdev_probe,
- .remove = msm_pdev_remove,
- .driver = {
- .name = "msm",
- },
-};
-
static int __init msm_drm_register(void)
{
if (!modeset)
@@ -1131,13 +1118,13 @@ static int __init msm_drm_register(void)
adreno_register();
msm_mdp4_register();
msm_mdss_register();
- return platform_driver_register(&msm_platform_driver);
+
+ return 0;
}
static void __exit msm_drm_unregister(void)
{
DBG("fini");
- platform_driver_unregister(&msm_platform_driver);
msm_mdss_unregister();
msm_mdp4_unregister();
msm_dp_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a65077855201..985db9febd98 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -48,8 +48,6 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_gem_address_space;
-struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
@@ -72,12 +70,6 @@ enum msm_dsi_controller {
#define MSM_GPU_MAX_RINGS 4
-/* Commit/Event thread specific structure */
-struct msm_drm_thread {
- struct drm_device *dev;
- struct kthread_worker *worker;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -88,16 +80,6 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* possibly this should be in the kms component, but it is
- * shared by both mdp4 and mdp5..
- */
- struct hdmi *hdmi;
-
- /* DSI is shared by mdp4 and mdp5 */
- struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
-
- struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
-
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -177,23 +159,6 @@ struct msm_drm_private {
struct mutex lock;
} lru;
- struct workqueue_struct *wq;
-
- unsigned int num_crtcs;
-
- struct msm_drm_thread event_thread[MAX_CRTCS];
-
- /* VRAM carveout, used when no IOMMU: */
- struct {
- unsigned long size;
- dma_addr_t paddr;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
- */
- struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
- } vram;
-
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
@@ -222,6 +187,29 @@ struct msm_drm_private {
* the sw hangcheck mechanism.
*/
bool disable_err_irq;
+
+ /**
+ * @fault_stall_lock:
+ *
+ * Serialize changes to stall-on-fault state.
+ */
+ spinlock_t fault_stall_lock;
+
+ /**
+ * @fault_stall_reenable_time:
+ *
+ * If stall_enabled is false, when to reenable stall-on-fault.
+ * Protected by @fault_stall_lock.
+ */
+ ktime_t stall_reenable_time;
+
+ /**
+ * @stall_enabled:
+ *
+ * Whether stall-on-fault is currently enabled. Protected by
+ * @fault_stall_lock.
+ */
+ bool stall_enabled;
};
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
@@ -241,11 +229,13 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
- struct drm_file *file);
+ struct drm_file *file);
+int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file);
#ifdef CONFIG_DEBUG_FS
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
@@ -257,25 +247,25 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needs_dirtyfb);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needed_dirtyfb);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#ifdef CONFIG_DRM_MSM_KMS_FBDEV
int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define MSM_FBDEV_DRIVER_OPS \
@@ -360,6 +350,7 @@ static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
}
#endif
+struct msm_dp;
#ifdef CONFIG_DRM_MSM_DP
int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
@@ -553,6 +544,10 @@ extern const struct component_master_ops msm_drm_ops;
int msm_kms_pm_prepare(struct device *dev);
void msm_kms_pm_complete(struct device *dev);
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops);
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops);
int msm_drv_probe(struct device *dev,
int (*kms_init)(struct drm_device *dev),
struct msm_kms *kms);
@@ -560,4 +555,6 @@ void msm_kms_shutdown(struct platform_device *pdev);
bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
+bool msm_gpu_no_components(void);
+
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 09268e416843..1eff615ff9bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -30,6 +30,7 @@ struct msm_framebuffer {
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb,
@@ -75,20 +76,22 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
/* prepare/pin all the fb's bo's for scanout.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needs_dirtyfb)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
if (needs_dirtyfb)
refcount_inc(&msm_fb->dirtyfb);
- atomic_inc(&msm_fb->prepare_count);
+ if (atomic_inc_return(&msm_fb->prepare_count) > 1)
+ return 0;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+ msm_gem_vma_get(fb->obj[i]);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], vm, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
@@ -98,25 +101,28 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needed_dirtyfb)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
if (needed_dirtyfb)
refcount_dec(&msm_fb->dirtyfb);
- for (i = 0; i < n; i++)
- msm_gem_unpin_iova(fb->obj[i], aspace);
+ if (atomic_dec_return(&msm_fb->prepare_count))
+ return;
+
+ memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
- if (!atomic_dec_return(&msm_fb->prepare_count))
- memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
+ for (i = 0; i < n; i++) {
+ msm_gem_unpin_iova(fb->obj[i], vm);
+ msm_gem_vma_put(fb->obj[i]);
+ }
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->iova[plane] + fb->offsets[plane];
@@ -134,10 +140,9 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
int ret, i, n = info->num_planes;
@@ -150,7 +155,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
}
}
- fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ fb = msm_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
@@ -165,10 +170,9 @@ out_unref:
}
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
@@ -222,7 +226,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->base.obj[i] = bos[i];
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
@@ -271,7 +275,10 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
msm_gem_object_set_name(bo, "stolenfb");
- fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = msm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c62249b1ab3d..b5969374d53f 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,7 +122,7 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->vm, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ebc9ba66efb8..7ff994d4f91a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
@@ -17,24 +16,9 @@
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
-#include "msm_mmu.h"
-
-static dma_addr_t physaddr(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
-}
-
-static bool use_pages(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
-}
+#include "msm_kms.h"
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
@@ -44,7 +28,7 @@ static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
@@ -55,13 +39,73 @@ static void update_ctx_mem(struct drm_file *file, ssize_t size)
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
+ msm_gem_vma_get(obj);
update_ctx_mem(file, obj->size);
return 0;
}
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
+
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
+
update_ctx_mem(file, -obj->size);
+ msm_gem_vma_put(obj);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
+ put_iova_spaces(obj, ctx->vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
+}
+
+/*
+ * Get/put for kms->vm VMA
+ */
+
+void msm_gem_vma_get(struct drm_gem_object *obj)
+{
+ atomic_inc(&to_msm_bo(obj)->vma_ref);
+}
+
+void msm_gem_vma_put(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct drm_exec exec;
+
+ if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
+ return;
+
+ if (!priv->kms)
+ return;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
+ put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
+ drm_exec_fini(&exec); /* drop locks */
}
/*
@@ -135,36 +179,6 @@ static void update_lru(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
-
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
- }
-
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = pfn_to_page(__phys_to_pfn(paddr));
- paddr += PAGE_SIZE;
- }
-
- return p;
-}
-
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -176,10 +190,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
+ p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -212,22 +223,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return msm_obj->pages;
}
-static void put_pages_vram(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
-
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
-
- kvfree(msm_obj->pages);
-}
-
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
@@ -244,18 +250,14 @@ static void put_pages(struct drm_gem_object *obj)
update_device_mem(obj->dev->dev_private, -obj->size);
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
update_lru(obj);
}
}
-static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
- unsigned madv)
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -397,48 +399,31 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
- vma = msm_gem_vma_new(aspace);
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- list_add_tail(&vma->list, &msm_obj->vmas);
-
- return vma;
-}
-
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == vm) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
+ return vma;
+ }
+ }
}
return NULL;
}
-static void del_vma(struct msm_gem_vma *vma)
-{
- if (!vma)
- return;
-
- list_del(&vma->list);
- kfree(vma);
-}
-
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
@@ -446,71 +431,54 @@ static void del_vma(struct msm_gem_vma *vma)
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace) {
- msm_gem_vma_purge(vma);
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
+
+ if (vm && vm_bo->vm != vm)
+ continue;
+
+ drm_gpuvm_bo_get(vm_bo);
+
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ msm_gem_vma_unmap(vma, reason);
if (close)
msm_gem_vma_close(vma);
}
- }
-}
-/* Called with msm_obj locked */
-static void
-put_iova_vmas(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
-
- msm_gem_assert_locked(obj);
-
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- del_vma(vma);
+ drm_gpuvm_bo_put(vm_bo);
}
}
-static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
- vma = lookup_vma(obj, aspace);
+ vma = lookup_vma(obj, vm);
if (!vma) {
- int ret;
-
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return vma;
-
- ret = msm_gem_vma_init(vma, obj->size,
- range_start, range_end);
- if (ret) {
- del_vma(vma);
- return ERR_PTR(ret);
- }
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
- GEM_WARN_ON(vma->iova < range_start);
- GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
@@ -522,13 +490,22 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
+ return prot;
+}
+
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int prot = msm_gem_prot(obj);
+
msm_gem_assert_locked(obj);
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -560,28 +537,31 @@ void msm_gem_unpin_active(struct drm_gem_object *obj)
update_lru_active(obj);
}
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- return get_vma_locked(obj, aspace, 0, U64_MAX);
+ return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
- vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
+ vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
- *iova = vma->iova;
+ *iova = vma->va.addr;
pin_obj_locked(obj);
}
@@ -593,58 +573,59 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
- ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
- vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->iova;
+ *iova = vma->va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+ struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
- msm_gem_vma_purge(vma);
+ msm_gem_vma_unmap(vma, NULL);
msm_gem_vma_close(vma);
- del_vma(vma);
return 0;
}
@@ -657,44 +638,54 @@ static int clear_iova(struct drm_gem_object *obj,
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova)
+ struct drm_gpuvm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
- ret = clear_iova(obj, aspace);
+ ret = clear_iova(obj, vm);
} else {
- struct msm_gem_vma *vma;
- vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ struct drm_gpuva *vma;
+ vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->iova != iova)) {
- clear_iova(obj, aspace);
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
+ clear_iova(obj, vm);
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
+static bool is_kms_vm(struct drm_gpuvm *vm)
+{
+ struct msm_drm_private *priv = vm->drm->dev_private;
+
+ return priv->kms && (priv->kms->vm == vm);
+}
+
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
- msm_gem_lock(obj);
- vma = lookup_vma(obj, aspace);
- if (!GEM_WARN_ON(!vma)) {
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = lookup_vma(obj, vm);
+ if (vma) {
msm_gem_unpin_locked(obj);
}
- msm_gem_unlock(obj);
+ if (!is_kms_vm(vm))
+ put_iova_spaces(obj, vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -735,7 +726,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_gem_assert_locked(obj);
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv);
@@ -853,7 +844,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, true);
+ put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
@@ -861,8 +852,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_pages(obj);
- put_iova_vmas(obj);
-
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
@@ -893,7 +882,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
@@ -920,7 +909,7 @@ bool msm_gem_active(struct drm_gem_object *obj)
if (to_msm_bo(obj)->pin_count)
return true;
- return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+ return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -959,11 +948,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- msm_gem_lock(obj);
+ if (!msm_gem_trylock(obj))
+ return;
stats->all.count++;
stats->all.size += obj->size;
@@ -1002,31 +991,33 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- const char *name, *comm;
- if (vma->aspace) {
- struct msm_gem_address_space *aspace = vma->aspace;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
- get_pid_task(aspace->pid, PIDTYPE_PID);
+ get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
- name = aspace->name;
- } else {
- name = comm = NULL;
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
}
- seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
- name, comm ? ":" : "", comm ? comm : "",
- vma->aspace, vma->iova,
- vma->mapped ? "mapped" : "unmapped");
- kfree(comm);
}
seq_puts(m, "\n");
@@ -1067,14 +1058,48 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- put_iova_spaces(obj, true);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case.
+ *
+ * Note that we skip the dance if we aren't attached to any VM. This
+ * is load bearing. The driver needs to support two usage models:
+ *
+ * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
+ * implicitly torn down when the object is freed, the VMA's do
+ * not hold a hard reference to the BO.
+ *
+ * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
+ * BO. This can be dropped when the VM is closed and it's associated
+ * VMAs are torn down. (See msm_gem_vm_close()).
+ *
+ * In the latter case the last reference to a BO can be dropped while
+ * we already have the VM locked. It would have already been removed
+ * from the gpuva list, but lockdep doesn't know that. Or understand
+ * the differences between the two usage models.
+ */
+ if (!list_empty(&obj->gpuva.list)) {
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec,
+ drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
+ put_iova_spaces(obj, NULL, true, "free");
+ drm_exec_fini(&exec); /* drop locks */
+ }
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
@@ -1082,13 +1107,18 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
*/
kvfree(msm_obj->pages);
- put_iova_vmas(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
- put_iova_vmas(obj);
+ }
+
+ if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
}
drm_gem_object_release(obj);
@@ -1123,6 +1153,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -1155,6 +1194,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
+ .export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
@@ -1194,7 +1234,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
- INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
@@ -1207,19 +1246,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
- bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
- if (!msm_use_mmu(dev))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
-
- if (GEM_WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
-
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
@@ -1232,44 +1262,16 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
msm_obj = to_msm_bo(obj);
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
-
- drm_gem_private_object_init(dev, obj, size);
-
- msm_gem_lock(obj);
-
- vma = add_vma(obj, NULL);
- msm_gem_unlock(obj);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
-
- to_msm_bo(obj)->vram_node = &vma->node;
-
- msm_gem_lock(obj);
- pages = get_pages(obj);
- msm_gem_unlock(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
-
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- /*
- * Our buffers are kept pinned, so allocating them from the
- * MOVABLE zone is a really bad idea, and conflicts with CMA.
- * See comments above new_inode() why this is required _and_
- * expected if you're going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- }
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
@@ -1297,12 +1299,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size;
int ret, npages;
- /* if we don't have IOMMU, don't bother pretending we can import: */
- if (!msm_use_mmu(dev)) {
- DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
- return ERR_PTR(-EINVAL);
- }
-
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
@@ -1348,9 +1344,9 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
@@ -1360,14 +1356,14 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_unpin_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
@@ -1383,14 +1379,13 @@ err:
}
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
+ msm_gem_unpin_iova(bo, vm);
drm_gem_object_put(bo);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 85f0257e83da..88239da1cd72 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -7,9 +7,11 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include "msm_mmu.h"
#include <linux/kref.h>
#include <linux/dma-resv.h>
#include "drm/drm_exec.h"
+#include "drm/drm_gpuvm.h"
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
@@ -22,56 +24,173 @@
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
-struct msm_gem_address_space {
- const char *name;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
+/**
+ * struct msm_gem_vm_log_entry - An entry in the VM log
+ *
+ * For userspace managed VMs, a log of recent VM updates is tracked and
+ * captured in GPU devcore dumps, to aid debugging issues caused by (for
+ * example) incorrectly synchronized VM updates
+ */
+struct msm_gem_vm_log_entry {
+ const char *op;
+ uint64_t iova;
+ uint64_t range;
+ int queue_id;
+};
+
+/**
+ * struct msm_gem_vm - VM object
+ *
+ * A VM object representing a GPU (or display or GMU or ...) virtual address
+ * space.
+ *
+ * In the case of GPU, if per-process address spaces are supported, the address
+ * space is split into two VMs, which map to TTBR0 and TTBR1 in the SMMU. TTBR0
+ * is used for userspace objects, and is unique per msm_context/drm_file, while
+ * TTBR1 is the same for all processes. (The kernel controlled ringbuffer and
+ * a few other kernel controlled buffers live in TTBR1.)
+ *
+ * The GPU TTBR0 vm can be managed by userspace or by the kernel, depending on
+ * whether userspace supports VM_BIND. All other vm's are managed by the kernel.
+ * (Managed by kernel means the kernel is responsible for VA allocation.)
+ *
+ * Note that because VM_BIND allows a given BO to be mapped multiple times in
+ * a VM, and therefore have multiple VMA's in a VM, there is an extra object
+ * provided by drm_gpuvm infrastructure.. the drm_gpuvm_bo, which is not
+ * embedded in any larger driver structure. The GEM object holds a list of
+ * drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma
+ * holds a reference to the vm_bo, and drops it when the vma is unlinked.
+ * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an
+ * existing vm_bo, or create a new one. Once the vma is linked, the ref
+ * to the vm_bo can be dropped (since the vma is holding one).
+ */
+struct msm_gem_vm {
+ /** @base: Inherit from drm_gpuvm. */
+ struct drm_gpuvm base;
+
+ /**
+ * @sched: Scheduler used for asynchronous VM_BIND request.
+ *
+ * Unused for kernel managed VMs (where all operations are synchronous).
+ */
+ struct drm_gpu_scheduler sched;
+
+ /**
+ * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre-
+ * allocated memory is in flight.
+ *
+ * Because we have to pre-allocate pgtable pages for the worst case
+ * (ie. new mappings do not share any PTEs with existing mappings)
+ * we could end up consuming a lot of resources transiently. The
+ * prealloc_throttle puts an upper bound on that.
+ */
+ struct {
+ /** @wait: Notified when preallocated resources are released */
+ wait_queue_head_t wait;
+
+ /**
+ * @in_flight: The # of preallocated pgtable pages in-flight
+ * for queued VM_BIND jobs.
+ */
+ atomic_t in_flight;
+ } prealloc_throttle;
+
+ /**
+ * @mm: Memory management for kernel managed VA allocations
+ *
+ * Only used for kernel managed VMs, unused for user managed VMs.
+ *
+ * Protected by @mm_lock.
*/
struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
+
+ /** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu;
- struct kref kref;
- /* For address spaces associated with a specific process, this
+ /** @mmu_lock: Protects access to the mmu */
+ struct mutex mmu_lock;
+
+ /**
+ * @pid: For address spaces associated with a specific process, this
* will be non-NULL:
*/
struct pid *pid;
- /* @faults: the number of GPU hangs associated with this address space */
+ /** @last_fence: Fence for last pending work scheduled on the VM */
+ struct dma_fence *last_fence;
+
+ /** @log: A log of recent VM updates */
+ struct msm_gem_vm_log_entry *log;
+
+ /** @log_shift: length of @log is (1 << @log_shift) */
+ uint32_t log_shift;
+
+ /** @log_idx: index of next @log entry to write */
+ uint32_t log_idx;
+
+ /** @faults: the number of GPU hangs associated with this address space */
int faults;
- /** @va_start: lowest possible address to allocate */
- uint64_t va_start;
+ /** @managed: is this a kernel managed VM? */
+ bool managed;
- /** @va_size: the size of the address space (in bytes) */
- uint64_t va_size;
+ /**
+ * @unusable: True if the VM has turned unusable because something
+ * bad happened during an asynchronous request.
+ *
+ * We don't try to recover from such failures, because this implies
+ * informing userspace about the specific operation that failed, and
+ * hoping the userspace driver can replay things from there. This all
+ * sounds very complicated for little gain.
+ *
+ * Instead, we should just flag the VM as unusable, and fail any
+ * further request targeting this VM.
+ *
+ * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
+ * situation, where the logical device needs to be re-created.
+ */
+ bool unusable;
};
+#define to_msm_vm(x) container_of(x, struct msm_gem_vm, base)
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace);
-
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed);
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size);
+void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
+void msm_gem_vm_unusable(struct drm_gpuvm *gpuvm);
struct msm_fence_context;
+#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0)
+
+/**
+ * struct msm_gem_vma - a VMA mapping
+ *
+ * Represents a combination of a GEM object plus a VM.
+ */
struct msm_gem_vma {
+ /** @base: inherit from drm_gpuva */
+ struct drm_gpuva base;
+
+ /**
+ * @node: mm node for VA allocation
+ *
+ * Only used by kernel managed VMs
+ */
struct drm_mm_node node;
- uint64_t iova;
- struct msm_gem_address_space *aspace;
- struct list_head list; /* node in msm_gem_object::vmas */
+
+ /** @mapped: Is this VMA mapped? */
bool mapped;
};
+#define to_msm_vma(x) container_of(x, struct msm_gem_vma, base)
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end);
-void msm_gem_vma_purge(struct msm_gem_vma *vma);
-int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
-void msm_gem_vma_close(struct msm_gem_vma *vma);
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end);
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
+void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
struct drm_gem_object base;
@@ -100,13 +219,6 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct list_head vmas; /* list of msm_gem_vma */
-
- /* For physically contiguous buffers. Used when we don't have
- * an IOMMU. Also used for stolen/splashscreen buffer.
- */
- struct drm_mm_node *vram_node;
-
char name[32]; /* Identifier to print for the debugfs files */
/* userspace metadata backchannel */
@@ -119,27 +231,56 @@ struct msm_gem_object {
* Protected by LRU lock.
*/
int pin_count;
+
+ /**
+ * @vma_ref: Reference count of VMA users.
+ *
+ * With the vm_bo/vma holding a reference to the GEM object, we'd
+ * otherwise have to actively tear down a VMA when, for example,
+ * a buffer is unpinned for scanout, vs. the pre-drm_gpuvm approach
+ * where a VMA did not hold a reference to the BO, but instead was
+ * implicitly torn down when the BO was freed.
+ *
+ * To regain the lazy VMA teardown, we use the @vma_ref. It is
+ * incremented for any of the following:
+ *
+ * 1) the BO is exported as a dma_buf
+ * 2) the BO has open userspace handle
+ *
+ * All of those conditions will hold an reference to the BO,
+ * preventing it from being freed. So lazily keeping around the
+ * VMA will not prevent the BO from being freed. (Or rather, the
+ * reference loop is harmless in this case.)
+ *
+ * When the @vma_ref drops to zero, then kms->vm VMA will be
+ * torn down.
+ */
+ atomic_t vma_ref;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+void msm_gem_vma_get(struct drm_gem_object *obj);
+void msm_gem_vma_put(struct drm_gem_object *obj);
+
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+int msm_gem_prot(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma);
void msm_gem_unpin_locked(struct drm_gem_object *obj);
void msm_gem_unpin_active(struct drm_gem_object *obj);
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova);
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm);
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -159,11 +300,10 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova);
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
@@ -188,6 +328,12 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
+static inline bool __must_check
+msm_gem_trylock(struct drm_gem_object *obj)
+{
+ return dma_resv_trylock(obj->resv);
+}
+
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -200,6 +346,37 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
+/**
+ * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM
+ * @exec: the exec context helper which will be initalized
+ * @obj: the GEM object to lock
+ * @vm: the VM to lock
+ *
+ * Operations which modify a VM frequently need to lock both the VM and
+ * the object being mapped/unmapped/etc. This helper uses drm_exec to
+ * acquire both locks, dealing with potential deadlock/backoff scenarios
+ * which arise when multiple locks are involved.
+ */
+static inline int
+msm_gem_lock_vm_and_obj(struct drm_exec *exec,
+ struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
+{
+ int ret = 0;
+
+ drm_exec_init(exec, 0, 2);
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm));
+ if (!ret && (obj->resv != drm_gpuvm_resv(vm)))
+ ret = drm_exec_lock_obj(exec, obj);
+ drm_exec_retry_on_contention(exec);
+ if (GEM_WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
@@ -224,7 +401,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.import_attach || msm_obj->pin_count;
+ return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -257,7 +434,7 @@ struct msm_gem_submit {
struct kref ref;
struct drm_device *dev;
struct msm_gpu *gpu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct list_head node; /* node in ring submit list */
struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
@@ -297,6 +474,7 @@ struct msm_gem_submit {
struct drm_gem_object *obj;
uint32_t handle;
};
+ struct drm_gpuvm_bo *vm_bo;
uint64_t iova;
} bos[];
};
@@ -320,14 +498,4 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
void msm_submit_retire(struct msm_gem_submit *submit);
-/* helper to determine of a buffer in submit should be dumped, used for both
- * devcoredump and debugfs cmdstream dumping:
- */
-static inline bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- extern bool rd_full;
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ee267490c935..c0a33ac839cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
@@ -16,6 +17,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
+ if (msm_obj->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EINVAL);
+
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return ERR_PTR(-ENOMEM);
@@ -39,20 +43,81 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
msm_gem_put_vaddr_locked(obj);
}
+static void msm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ msm_gem_vma_put(obj);
+ drm_gem_dmabuf_release(dma_buf);
+}
+
+static const struct dma_buf_ops msm_gem_prime_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = msm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf)
+{
+ if (buf->ops == &msm_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, buf);
+}
+
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
return msm_gem_import(dev, attach->dmabuf, sg);
}
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EPERM);
+
+ msm_gem_vma_get(obj);
+
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &msm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
struct page **pages;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return 0;
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
ret = PTR_ERR(pages);
@@ -62,7 +127,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return;
msm_gem_unpin_pages_locked(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 07ca4ddfe4e3..1039e3c0a47b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -44,7 +44,76 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
}
static bool
-purge(struct drm_gem_object *obj)
+with_vm_locks(struct ww_acquire_ctx *ticket,
+ void (*fn)(struct drm_gem_object *obj),
+ struct drm_gem_object *obj)
+{
+ /*
+ * Track last locked entry for for unwinding locks in error and
+ * success paths
+ */
+ struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
+ int ret = 0;
+
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ ret = dma_resv_lock(resv, ticket);
+
+ /*
+ * Since we already skip the case when the VM and obj
+ * share a resv (ie. _NO_SHARE objs), we don't expect
+ * to hit a double-locking scenario... which the lock
+ * unwinding cannot really cope with.
+ */
+ WARN_ON(ret == -EALREADY);
+
+ /*
+ * Don't bother with slow-lock / backoff / retry sequence,
+ * if we can't get the lock just give up and move on to
+ * the next object.
+ */
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Hold a ref to prevent the vm_bo from being freed
+ * and removed from the obj's gpuva list, as that would
+ * would result in missing the unlock below
+ */
+ drm_gpuvm_bo_get(vm_bo);
+
+ last_locked = vm_bo;
+ }
+
+ fn(obj);
+
+out_unlock:
+ if (last_locked) {
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ dma_resv_unlock(resv);
+
+ /* Drop the ref taken while locking: */
+ drm_gpuvm_bo_put(vm_bo);
+
+ if (last_locked == vm_bo)
+ break;
+ }
+ }
+
+ return ret == 0;
+}
+
+static bool
+purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_purgeable(to_msm_bo(obj)))
return false;
@@ -52,13 +121,11 @@ purge(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_purge(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_purge, obj);
}
static bool
-evict(struct drm_gem_object *obj)
+evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (is_unevictable(to_msm_bo(obj)))
return false;
@@ -66,43 +133,42 @@ evict(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_evict(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_evict, obj);
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
- enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
-active_purge(struct drm_gem_object *obj)
+active_purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return purge(obj);
+ return purge(obj, ticket);
}
static bool
-active_evict(struct drm_gem_object *obj)
+active_evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return evict(obj);
+ return evict(obj, ticket);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
+ struct ww_acquire_ctx ticket;
struct {
struct drm_gem_lru *lru;
- bool (*shrink)(struct drm_gem_object *obj);
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
bool cond;
unsigned long freed;
unsigned long remaining;
@@ -122,8 +188,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
- &stages[i].remaining,
- stages[i].shrink);
+ &stages[i].remaining,
+ stages[i].shrink,
+ &ticket);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
@@ -164,7 +231,7 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct drm_gem_object *obj)
+vmap_shrink(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_vunmapable(to_msm_bo(obj)))
return false;
@@ -192,7 +259,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
&remaining,
- vmap_shrink);
+ vmap_shrink,
+ NULL);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e9aa2cc38ef..5f8e939a5906 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+#include "msm_syncobj.h"
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
@@ -30,7 +32,7 @@
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
- uint32_t nr_cmds)
+ uint32_t nr_cmds, u64 drm_client_id)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
@@ -54,7 +56,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
+ drm_client_id);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
@@ -63,7 +66,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
kref_init(&submit->ref);
submit->dev = dev;
- submit->aspace = queue->ctx->aspace;
+ submit->vm = msm_context_vm(dev, queue->ctx);
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
@@ -85,6 +88,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
container_of(kref, struct msm_gem_submit, ref);
unsigned i;
+ /*
+ * In error paths, we could unref the submit without calling
+ * drm_sched_entity_push_job(), so msm_job_free() will never
+ * get called. Since drm_sched_job_cleanup() will NULL out
+ * s_fence, we can use that to detect this case.
+ */
+ if (submit->base.s_fence)
+ drm_sched_job_cleanup(&submit->base);
+
if (submit->fence_id) {
spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
@@ -182,6 +194,7 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
unsigned i;
size_t sz;
int ret = 0;
@@ -213,6 +226,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
goto out;
}
+ if (msm_context_is_vmbind(ctx)) {
+ if (submit_cmd.nr_relocs) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
+ goto out;
+ }
+
+ if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
+ goto out;
+ }
+
+ submit->cmd[i].iova = submit_cmd.iova;
+ }
+
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
@@ -247,24 +274,48 @@ out:
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ struct drm_exec *exec = &submit->exec;
int ret;
- drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ flags |= DRM_EXEC_IGNORE_DUPLICATES;
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
+
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ return ret;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto error;
+ return ret;
}
}
return 0;
-
-error:
- return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -299,10 +350,10 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
/* if locking succeeded, pin bo: */
- vma = msm_gem_get_vma_locked(obj, submit->aspace);
+ vma = msm_gem_get_vma_locked(obj, submit->vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
@@ -312,7 +363,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].iova = vma->iova;
+ submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
+ submit->bos[i].iova = vma->va.addr;
}
/*
@@ -349,9 +401,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit)
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
- int i;
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+ struct dma_fence *last_fence;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
+ submit->user_fence,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+ return;
+ }
- for (i = 0; i < submit->nr_bos; i++) {
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -361,6 +422,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
@@ -449,14 +514,14 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
+
if (error) {
submit_unpin_objects(submit);
/* job wasn't enqueued to scheduler, so early retirement: */
msm_submit_retire(submit);
}
-
- if (submit->exec.objects)
- drm_exec_fini(&submit->exec);
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -465,190 +530,29 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
+ struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
+ msm_gem_lock(obj);
+ drm_gpuvm_bo_put(vm_bo);
+ msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
-struct msm_submit_post_dep {
- struct drm_syncobj *syncobj;
- uint64_t point;
- struct dma_fence_chain *chain;
-};
-
-static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
- struct drm_file *file,
- uint64_t in_syncobjs_addr,
- uint32_t nr_in_syncobjs,
- size_t syncobj_stride)
-{
- struct drm_syncobj **syncobjs = NULL;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!syncobjs)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_in_syncobjs; ++i) {
- uint64_t address = in_syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- if (syncobj_desc.point &&
- !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
- break;
- }
-
- if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
- break;
- }
-
- ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
- syncobj_desc.handle, syncobj_desc.point);
- if (ret)
- break;
-
- if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
- syncobjs[i] =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!syncobjs[i]) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
- break;
- }
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- if (syncobjs[j])
- drm_syncobj_put(syncobjs[j]);
- }
- kfree(syncobjs);
- return ERR_PTR(ret);
- }
- return syncobjs;
-}
-
-static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
- uint32_t nr_syncobjs)
-{
- uint32_t i;
-
- for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
- if (syncobjs[i])
- drm_syncobj_replace_fence(syncobjs[i], NULL);
- }
-}
-
-static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t syncobjs_addr,
- uint32_t nr_syncobjs,
- size_t syncobj_stride)
-{
- struct msm_submit_post_dep *post_deps;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!post_deps)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_syncobjs; ++i) {
- uint64_t address = syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- post_deps[i].point = syncobj_desc.point;
-
- if (syncobj_desc.flags) {
- ret = UERR(EINVAL, dev, "invalid syncobj flags");
- break;
- }
-
- if (syncobj_desc.point) {
- if (!drm_core_check_feature(dev,
- DRIVER_SYNCOBJ_TIMELINE)) {
- ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
- break;
- }
-
- post_deps[i].chain = dma_fence_chain_alloc();
- if (!post_deps[i].chain) {
- ret = -ENOMEM;
- break;
- }
- }
-
- post_deps[i].syncobj =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!post_deps[i].syncobj) {
- ret = UERR(EINVAL, dev, "invalid syncobj handle");
- break;
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- dma_fence_chain_free(post_deps[j].chain);
- if (post_deps[j].syncobj)
- drm_syncobj_put(post_deps[j].syncobj);
- }
-
- kfree(post_deps);
- return ERR_PTR(ret);
- }
-
- return post_deps;
-}
-
-static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
- uint32_t count, struct dma_fence *fence)
-{
- uint32_t i;
-
- for (i = 0; post_deps && i < count; ++i) {
- if (post_deps[i].chain) {
- drm_syncobj_add_point(post_deps[i].syncobj,
- post_deps[i].chain,
- fence, post_deps[i].point);
- post_deps[i].chain = NULL;
- } else {
- drm_syncobj_replace_fence(post_deps[i].syncobj,
- fence);
- }
- }
-}
-
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
- struct msm_submit_post_dep *post_deps = NULL;
+ struct msm_syncobj_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
+ unsigned cmds_to_parse;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -659,10 +563,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
- DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
- return -EPERM;
- }
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
@@ -683,6 +585,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
ring = gpu->rb[queue->ring_nr];
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
@@ -693,7 +600,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
+ file->client_id);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
goto out_post_unlock;
@@ -725,10 +633,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
- syncobjs_to_reset = msm_parse_deps(submit, file,
- args->in_syncobjs,
- args->nr_in_syncobjs,
- args->syncobj_stride);
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(syncobjs_to_reset)) {
ret = PTR_ERR(syncobjs_to_reset);
goto out_unlock;
@@ -736,10 +644,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
- post_deps = msm_parse_post_deps(dev, file,
- args->out_syncobjs,
- args->nr_out_syncobjs,
- args->syncobj_stride);
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
goto out_unlock;
@@ -769,7 +677,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- for (i = 0; i < args->nr_cmds; i++) {
+ cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
+
+ for (i = 0; i < cmds_to_parse; i++) {
struct drm_gem_object *obj;
uint64_t iova;
@@ -800,7 +710,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- submit->nr_cmds = i;
+ submit->nr_cmds = args->nr_cmds;
idr_preload(GFP_KERNEL);
@@ -858,7 +768,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- struct sync_file *sync_file = sync_file_create(submit->user_fence);
+ sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
} else {
@@ -872,6 +782,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit_attach_object_fences(submit);
+ if (msm_context_is_vmbind(ctx)) {
+ /*
+ * If we are not using VM_BIND, submit_pin_vmas() will validate
+ * just the BOs attached to the submit. In that case we don't
+ * need to validate the _entire_ vm, because userspace tracked
+ * what BOs are associated with the submit.
+ */
+ ret = drm_gpuvm_validate(submit->vm, &submit->exec);
+ if (ret)
+ goto out;
+ }
+
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
@@ -882,18 +804,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
- msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
- msm_process_post_deps(post_deps, args->nr_out_syncobjs,
- submit->user_fence);
-
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
out:
submit_cleanup(submit, !!ret);
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0))
+ if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
if (!IS_ERR_OR_NULL(submit)) {
msm_gem_submit_put(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 11e842dda73c..3cd8562a5109 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -4,73 +4,319 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include "drm/drm_file.h"
+#include "drm/msm_drm.h"
+#include "linux/file.h"
+#include "linux/sync_file.h"
+
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
+#include "msm_gpu.h"
#include "msm_mmu.h"
+#include "msm_syncobj.h"
+
+#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+static uint vm_log_shift = 0;
+MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
+module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
+
+/**
+ * struct msm_vm_map_op - create new pgtable mapping
+ */
+struct msm_vm_map_op {
+ /** @iova: start address for mapping */
+ uint64_t iova;
+ /** @range: size of the region to map */
+ uint64_t range;
+ /** @offset: offset into @sgt to map */
+ uint64_t offset;
+ /** @sgt: pages to map, or NULL for a PRR mapping */
+ struct sg_table *sgt;
+ /** @prot: the mapping protection flags */
+ int prot;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vm_unmap_op - unmap a range of pages from pgtable
+ */
+struct msm_vm_unmap_op {
+ /** @iova: start address for unmap */
+ uint64_t iova;
+ /** @range: size of region to unmap */
+ uint64_t range;
+
+ /** @reason: The reason for the unmap */
+ const char *reason;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vma_op - A MAP or UNMAP operation
+ */
+struct msm_vm_op {
+ /** @op: The operation type */
+ enum {
+ MSM_VM_OP_MAP = 1,
+ MSM_VM_OP_UNMAP,
+ } op;
+ union {
+ /** @map: Parameters used if op == MSM_VMA_OP_MAP */
+ struct msm_vm_map_op map;
+ /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
+ struct msm_vm_unmap_op unmap;
+ };
+ /** @node: list head in msm_vm_bind_job::vm_ops */
+ struct list_head node;
+
+ /**
+ * @obj: backing object for pages to be mapped/unmapped
+ *
+ * Async unmap ops, in particular, must hold a reference to the
+ * original GEM object backing the mapping that will be unmapped.
+ * But the same can be required in the map path, for example if
+ * there is not a corresponding unmap op, such as process exit.
+ *
+ * This ensures that the pages backing the mapping are not freed
+ * before the mapping is torn down.
+ */
+ struct drm_gem_object *obj;
+};
+
+/**
+ * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
+ *
+ * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
+ * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
+ * which are applied to the pgtables asynchronously. For example a userspace
+ * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
+ * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
+ */
+struct msm_vm_bind_job {
+ /** @base: base class for drm_sched jobs */
+ struct drm_sched_job base;
+ /** @vm: The VM being operated on */
+ struct drm_gpuvm *vm;
+ /** @fence: The fence that is signaled when job completes */
+ struct dma_fence *fence;
+ /** @queue: The queue that the job runs on */
+ struct msm_gpu_submitqueue *queue;
+ /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
+ struct msm_mmu_prealloc prealloc;
+ /** @vm_ops: a list of struct msm_vm_op */
+ struct list_head vm_ops;
+ /** @bos_pinned: are the GEM objects being bound pinned? */
+ bool bos_pinned;
+ /** @nr_ops: the number of userspace requested ops */
+ unsigned int nr_ops;
+ /**
+ * @ops: the userspace requested ops
+ *
+ * The userspace requested ops are copied/parsed and validated
+ * before we start applying the updates to try to do as much up-
+ * front error checking as possible, to avoid the VM being in an
+ * undefined state due to partially executed VM_BIND.
+ *
+ * This table also serves to hold a reference to the backing GEM
+ * objects.
+ */
+ struct msm_vm_bind_op {
+ uint32_t op;
+ uint32_t flags;
+ union {
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ };
+ uint64_t obj_offset;
+ uint64_t iova;
+ uint64_t range;
+ } ops[];
+};
+
+#define job_foreach_bo(obj, _job) \
+ for (unsigned i = 0; i < (_job)->nr_ops; i++) \
+ if ((obj = (_job)->ops[i].obj))
+
+static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
+{
+ return container_of(job, struct msm_vm_bind_job, base);
+}
static void
-msm_gem_address_space_destroy(struct kref *kref)
+msm_gem_vm_free(struct drm_gpuvm *gpuvm)
{
- struct msm_gem_address_space *aspace = container_of(kref,
- struct msm_gem_address_space, kref);
+ struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
- drm_mm_takedown(&aspace->mm);
- if (aspace->mmu)
- aspace->mmu->funcs->destroy(aspace->mmu);
- put_pid(aspace->pid);
- kfree(aspace);
+ drm_mm_takedown(&vm->mm);
+ if (vm->mmu)
+ vm->mmu->funcs->destroy(vm->mmu);
+ dma_fence_put(vm->last_fence);
+ put_pid(vm->pid);
+ kfree(vm->log);
+ kfree(vm);
}
+/**
+ * msm_gem_vm_unusable() - Mark a VM as unusable
+ * @gpuvm: the VM to mark unusable
+ */
+void
+msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ uint32_t nr_vm_logs;
+ int first;
+
+ vm->unusable = true;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ nr_vm_logs = vm_log_len;
+ }
+
+ pr_err("vm-log:\n");
+ for (int i = 0; i < nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+ struct msm_gem_vm_log_entry *e = &vm->log[idx];
+ pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+static void
+vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
{
- if (aspace)
- kref_put(&aspace->kref, msm_gem_address_space_destroy);
+ int idx;
+
+ if (!vm->managed)
+ lockdep_assert_held(&vm->mmu_lock);
+
+ vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
+
+ if (!vm->log)
+ return;
+
+ idx = vm->log_idx;
+ vm->log[idx].op = op;
+ vm->log[idx].iova = iova;
+ vm->log[idx].range = range;
+ vm->log[idx].queue_id = queue_id;
+ vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
+}
+
+static void
+vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
+{
+ const char *reason = op->reason;
+
+ if (!reason)
+ reason = "unmap";
+
+ vm_log(vm, reason, op->iova, op->range, op->queue_id);
+
+ vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
}
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+static int
+vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
{
- if (!IS_ERR_OR_NULL(aspace))
- kref_get(&aspace->kref);
+ vm_log(vm, "map", op->iova, op->range, op->queue_id);
- return aspace;
+ return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
+ op->range, op->prot);
}
/* Actually unmap memory for the vma */
-void msm_gem_vma_purge(struct msm_gem_vma *vma)
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
{
- struct msm_gem_address_space *aspace = vma->aspace;
- unsigned size = vma->node.size;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
/* Don't do anything if the memory isn't mapped */
- if (!vma->mapped)
+ if (!msm_vma->mapped)
return;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
- vma->mapped = false;
+ vm_unmap_op(vm, &(struct msm_vm_unmap_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .reason = reason,
+ });
+
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ msm_vma->mapped = false;
}
/* Map and pin vma: */
int
-msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int size)
+msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
int ret;
- if (GEM_WARN_ON(!vma->iova))
+ if (GEM_WARN_ON(!vma->va.addr))
return -EINVAL;
- if (vma->mapped)
+ if (msm_vma->mapped)
return 0;
- vma->mapped = true;
+ msm_vma->mapped = true;
- if (!aspace)
- return 0;
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
@@ -81,97 +327,1205 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
+ ret = vm_map_op(vm, &(struct msm_vm_map_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .sgt = sgt,
+ .prot = prot,
+ });
- if (ret) {
- vma->mapped = false;
- }
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ if (ret)
+ msm_vma->mapped = false;
return ret;
}
/* Close an iova. Warn if it is still in use */
-void msm_gem_vma_close(struct msm_gem_vma *vma)
+void msm_gem_vma_close(struct drm_gpuva *vma)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- GEM_WARN_ON(vma->mapped);
+ GEM_WARN_ON(msm_vma->mapped);
- spin_lock(&aspace->lock);
- if (vma->iova)
- drm_mm_remove_node(&vma->node);
- spin_unlock(&aspace->lock);
+ drm_gpuvm_resv_assert_held(&vm->base);
+
+ if (vma->gem.obj)
+ msm_gem_assert_locked(vma->gem.obj);
- vma->iova = 0;
+ if (vma->va.addr && vm->managed)
+ drm_mm_remove_node(&msm_vma->node);
- msm_gem_address_space_put(aspace);
+ drm_gpuva_remove(vma);
+ drm_gpuva_unlink(vma);
+
+ kfree(vma);
}
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
+/* Create a new vma and allocate an iova for it */
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end)
{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
+ int ret;
+
+ drm_gpuvm_resv_assert_held(&vm->base);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ if (vm->managed) {
+ BUG_ON(offset != 0);
+ BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */
+ ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
+ obj->size, PAGE_SIZE, 0,
+ range_start, range_end, 0);
+
+ if (ret)
+ goto err_free_vma;
+
+ range_start = vma->node.start;
+ range_end = range_start + obj->size;
+ }
+
+ if (obj)
+ GEM_WARN_ON((range_end - range_start) > obj->size);
+
+ drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ vma->mapped = false;
+
+ ret = drm_gpuva_insert(&vm->base, &vma->base);
+ if (ret)
+ goto err_free_range;
- vma->aspace = aspace;
+ if (!obj)
+ return &vma->base;
- return vma;
+ vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
+ if (IS_ERR(vm_bo)) {
+ ret = PTR_ERR(vm_bo);
+ goto err_va_remove;
+ }
+
+ drm_gpuvm_bo_extobj_add(vm_bo);
+ drm_gpuva_link(&vma->base, vm_bo);
+ GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
+
+ return &vma->base;
+
+err_va_remove:
+ drm_gpuva_remove(&vma->base);
+err_free_range:
+ if (vm->managed)
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ kfree(vma);
+ return ERR_PTR(ret);
}
-/* Initialize a new vma and allocate an iova for it */
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end)
+static int
+msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct drm_gem_object *obj = vm_bo->obj;
+ struct drm_gpuva *vma;
int ret;
- if (GEM_WARN_ON(!aspace))
- return -EINVAL;
+ vm_dbg("validate: %p", obj);
+
+ msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(vma->iova))
- return -EBUSY;
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (ret)
+ return ret;
+ }
- spin_lock(&aspace->lock);
- ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
- size, PAGE_SIZE, 0,
- range_start, range_end, 0);
- spin_unlock(&aspace->lock);
+ return 0;
+}
- if (ret)
- return ret;
+struct op_arg {
+ unsigned flags;
+ struct msm_vm_bind_job *job;
+};
- vma->iova = vma->node.start;
- vma->mapped = false;
+static void
+vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
+{
+ struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
+ *op = _op;
+ list_add_tail(&op->node, &arg->job->vm_ops);
+
+ if (op->obj)
+ drm_gem_object_get(op->obj);
+}
+
+static struct drm_gpuva *
+vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
+{
+ return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
+ op->va.addr, op->va.addr + op->va.range);
+}
+
+static int
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gem_object *obj = op->map.gem.obj;
+ struct drm_gpuva *vma;
+ struct sg_table *sgt;
+ unsigned prot;
+
+ vma = vma_from_op(arg, &op->map);
+ if (WARN_ON(IS_ERR(vma)))
+ return PTR_ERR(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ vma->flags = ((struct op_arg *)arg)->flags;
+
+ if (obj) {
+ sgt = to_msm_bo(obj)->sgt;
+ prot = msm_gem_prot(obj);
+ } else {
+ sgt = NULL;
+ prot = IOMMU_READ | IOMMU_WRITE;
+ }
- kref_get(&aspace->kref);
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_MAP,
+ .map = {
+ .sgt = sgt,
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .prot = prot,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ to_msm_vma(vma)->mapped = true;
+
+ return 0;
+}
+
+static int
+msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuvm *vm = job->vm;
+ struct drm_gpuva *orig_vma = op->remap.unmap->va;
+ struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
+ struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
+ bool mapped = to_msm_vma(orig_vma)->mapped;
+ unsigned flags;
+
+ vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
+ orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
+
+ if (mapped) {
+ uint64_t unmap_start, unmap_range;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = unmap_start,
+ .range = unmap_range,
+ .queue_id = job->queue->id,
+ },
+ .obj = orig_vma->gem.obj,
+ });
+
+ /*
+ * Part of this GEM obj is still mapped, but we're going to kill the
+ * existing VMA and replace it with one or two new ones (ie. two if
+ * the unmapped range is in the middle of the existing (unmap) VMA).
+ * So just set the state to unmapped:
+ */
+ to_msm_vma(orig_vma)->mapped = false;
+ }
+
+ /*
+ * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
+ * creation of the new prev/next vma's, in case the vm_bo is tracked
+ * in the VM's evict list:
+ */
+ if (vm_bo)
+ drm_gpuvm_bo_get(vm_bo);
+
+ /*
+ * The prev_vma and/or next_vma are replacing the unmapped vma, and
+ * therefore should preserve it's flags:
+ */
+ flags = orig_vma->flags;
+
+ msm_gem_vma_close(orig_vma);
+
+ if (op->remap.prev) {
+ prev_vma = vma_from_op(arg, op->remap.prev);
+ if (WARN_ON(IS_ERR(prev_vma)))
+ return PTR_ERR(prev_vma);
+
+ vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
+ to_msm_vma(prev_vma)->mapped = mapped;
+ prev_vma->flags = flags;
+ }
+
+ if (op->remap.next) {
+ next_vma = vma_from_op(arg, op->remap.next);
+ if (WARN_ON(IS_ERR(next_vma)))
+ return PTR_ERR(next_vma);
+
+ vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
+ to_msm_vma(next_vma)->mapped = mapped;
+ next_vma->flags = flags;
+ }
+
+ if (!mapped)
+ drm_gpuvm_bo_evict(vm_bo, true);
+
+ /* Drop the previous ref: */
+ drm_gpuvm_bo_put(vm_bo);
return 0;
}
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size)
+static int
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuva *vma = op->unmap.va;
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ if (!msm_vma->mapped)
+ goto out_close;
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ msm_vma->mapped = false;
+
+out_close:
+ msm_gem_vma_close(vma);
+
+ return 0;
+}
+
+static const struct drm_gpuvm_ops msm_gpuvm_ops = {
+ .vm_free = msm_gem_vm_free,
+ .vm_bo_validate = msm_gem_vm_bo_validate,
+ .sm_step_map = msm_gem_vm_sm_step_map,
+ .sm_step_remap = msm_gem_vm_sm_step_remap,
+ .sm_step_unmap = msm_gem_vm_sm_step_unmap,
+};
+
+static struct dma_fence *
+msm_vma_job_run(struct drm_sched_job *_job)
+{
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+ int ret = vm->unusable ? -EINVAL : 0;
+
+ vm_dbg("");
+
+ mutex_lock(&vm->mmu_lock);
+ vm->mmu->prealloc = &job->prealloc;
+
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+
+ switch (op->op) {
+ case MSM_VM_OP_MAP:
+ /*
+ * On error, stop trying to map new things.. but we
+ * still want to process the unmaps (or in particular,
+ * the drm_gem_object_put()s)
+ */
+ if (!ret)
+ ret = vm_map_op(vm, &op->map);
+ break;
+ case MSM_VM_OP_UNMAP:
+ vm_unmap_op(vm, &op->unmap);
+ break;
+ }
+ drm_gem_object_put(op->obj);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ vm->mmu->prealloc = NULL;
+ mutex_unlock(&vm->mmu_lock);
+
+ /*
+ * We failed to perform at least _some_ of the pgtable updates, so
+ * now the VM is in an undefined state. Game over!
+ */
+ if (ret)
+ msm_gem_vm_unusable(job->vm);
+
+ job_foreach_bo (obj, job) {
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
+ msm_gem_unlock(obj);
+ }
+
+ /* VM_BIND ops are synchronous, so no fence to wait on: */
+ return NULL;
+}
+
+static void
+msm_vma_job_free(struct drm_sched_job *_job)
{
- struct msm_gem_address_space *aspace;
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+
+ vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
+
+ atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ drm_sched_job_cleanup(_job);
+
+ job_foreach_bo (obj, job)
+ drm_gem_object_put(obj);
+
+ msm_submitqueue_put(job->queue);
+ dma_fence_put(job->fence);
+
+ /* In error paths, we could have unexecuted ops: */
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ wake_up(&vm->prealloc_throttle.wait);
+
+ kfree(job);
+}
+
+static const struct drm_sched_backend_ops msm_vm_bind_ops = {
+ .run_job = msm_vma_job_run,
+ .free_job = msm_vma_job_free
+};
+
+/**
+ * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
+ * @drm: the drm device
+ * @mmu: the backing MMU objects handling mapping/unmapping
+ * @name: the name of the VM
+ * @va_start: the start offset of the VA space
+ * @va_size: the size of the VA space
+ * @managed: is it a kernel managed VM?
+ *
+ * In a kernel managed VM, the kernel handles address allocation, and only
+ * synchronous operations are supported. In a user managed VM, userspace
+ * handles virtual address allocation, and both async and sync operations
+ * are supported.
+ */
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed)
+{
+ /*
+ * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
+ * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
+ * tracking that an extobj is evicted) :facepalm:
+ */
+ enum drm_gpuvm_flags flags = 0;
+ struct msm_gem_vm *vm;
+ struct drm_gem_object *dummy_gem;
+ int ret = 0;
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = mmu;
- aspace->va_start = va_start;
- aspace->va_size = size;
+ dummy_gem = drm_gpuvm_resv_object_alloc(drm);
+ if (!dummy_gem) {
+ ret = -ENOMEM;
+ goto err_free_vm;
+ }
+
+ if (!managed) {
+ struct drm_sched_init_args args = {
+ .ops = &msm_vm_bind_ops,
+ .num_rqs = 1,
+ .credit_limit = 1,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = "msm-vm-bind",
+ .dev = drm->dev,
+ };
+
+ ret = drm_sched_init(&vm->sched, &args);
+ if (ret)
+ goto err_free_dummy;
+
+ init_waitqueue_head(&vm->prealloc_throttle.wait);
+ }
+
+ drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
+ va_start, va_size, 0, 0, &msm_gpuvm_ops);
+ drm_gem_object_put(dummy_gem);
+
+ vm->mmu = mmu;
+ mutex_init(&vm->mmu_lock);
+ vm->managed = managed;
+
+ drm_mm_init(&vm->mm, va_start, va_size);
+
+ /*
+ * We don't really need vm log for kernel managed VMs, as the kernel
+ * is responsible for ensuring that GEM objs are mapped if they are
+ * used by a submit. Furthermore we piggyback on mmu_lock to serialize
+ * access to the log.
+ *
+ * Limit the max log_shift to 8 to prevent userspace from asking us
+ * for an unreasonable log size.
+ */
+ if (!managed)
+ vm->log_shift = MIN(vm_log_shift, 8);
+
+ if (vm->log_shift) {
+ vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
+ GFP_KERNEL | __GFP_ZERO);
+ }
+
+ return &vm->base;
+
+err_free_dummy:
+ drm_gem_object_put(dummy_gem);
+
+err_free_vm:
+ kfree(vm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * msm_gem_vm_close() - Close a VM
+ * @gpuvm: The VM to close
+ *
+ * Called when the drm device file is closed, to tear down VM related resources
+ * (which will drop refcounts to GEM objects that were still mapped into the
+ * VM at the time).
+ */
+void
+msm_gem_vm_close(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuva *vma, *tmp;
+ struct drm_exec exec;
+
+ /*
+ * For kernel managed VMs, the VMAs are torn down when the handle is
+ * closed, so nothing more to do.
+ */
+ if (vm->managed)
+ return;
- drm_mm_init(&aspace->mm, va_start, size);
+ if (vm->last_fence)
+ dma_fence_wait(vm->last_fence, false);
+
+ /* Kill the scheduler now, so we aren't racing with it for cleanup: */
+ drm_sched_stop(&vm->sched, NULL);
+ drm_sched_fini(&vm->sched);
+
+ /* Tear down any remaining mappings: */
+ drm_exec_init(&exec, 0, 2);
+ drm_exec_until_all_locked (&exec) {
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
+ struct drm_gem_object *obj = vma->gem.obj;
+
+ /*
+ * MSM_BO_NO_SHARE objects share the same resv as the
+ * VM, in which case the obj is already locked:
+ */
+ if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
+ obj = NULL;
+
+ if (obj) {
+ drm_exec_lock_obj(&exec, obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ msm_gem_vma_unmap(vma, "close");
+ msm_gem_vma_close(vma);
+
+ if (obj) {
+ drm_exec_unlock_obj(&exec, obj);
+ }
+ }
+ }
+ drm_exec_fini(&exec);
+}
+
+
+static struct msm_vm_bind_job *
+vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
+ struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
+{
+ struct msm_vm_bind_job *job;
+ uint64_t sz;
+ int ret;
- kref_init(&aspace->kref);
+ sz = struct_size(job, ops, nr_ops);
- return aspace;
+ if (sz > SIZE_MAX)
+ return ERR_PTR(-ENOMEM);
+
+ job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
+ file->client_id);
+ if (ret) {
+ kfree(job);
+ return ERR_PTR(ret);
+ }
+
+ job->vm = msm_context_vm(dev, queue->ctx);
+ job->queue = queue;
+ INIT_LIST_HEAD(&job->vm_ops);
+
+ return job;
+}
+
+static bool invalid_alignment(uint64_t addr)
+{
+ /*
+ * Technically this is about GPU alignment, not CPU alignment. But
+ * I've not seen any qcom SoC where the SMMU does not support the
+ * CPU's smallest page size.
+ */
+ return !PAGE_ALIGNED(addr);
+}
+
+static int
+lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
+{
+ struct drm_device *dev = job->vm->drm;
+ int i = job->nr_ops++;
+ int ret = 0;
+
+ job->ops[i].op = op->op;
+ job->ops[i].handle = op->handle;
+ job->ops[i].obj_offset = op->obj_offset;
+ job->ops[i].iova = op->iova;
+ job->ops[i].range = op->range;
+ job->ops[i].flags = op->flags;
+
+ if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
+ ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
+
+ if (invalid_alignment(op->iova))
+ ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
+
+ if (invalid_alignment(op->obj_offset))
+ ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
+
+ if (invalid_alignment(op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
+
+ if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
+
+ /*
+ * MAP must specify a valid handle. But the handle MBZ for
+ * UNMAP or MAP_NULL.
+ */
+ if (op->op == MSM_VM_BIND_OP_MAP) {
+ if (!op->handle)
+ ret = UERR(EINVAL, dev, "invalid handle\n");
+ } else if (op->handle) {
+ ret = UERR(EINVAL, dev, "handle must be zero\n");
+ }
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ case MSM_VM_BIND_OP_UNMAP:
+ break;
+ default:
+ ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * ioctl parsing, parameter validation, and GEM handle lookup
+ */
+static int
+vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
+ struct drm_file *file, int *nr_bos)
+{
+ struct drm_device *dev = job->vm->drm;
+ int ret = 0;
+ int cnt = 0;
+
+ if (args->nr_ops == 1) {
+ /* Single op case, the op is inlined: */
+ ret = lookup_op(job, &args->op);
+ } else {
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_msm_vm_bind_op op;
+ void __user *userptr =
+ u64_to_user_ptr(args->ops + (i * sizeof(op)));
+
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ job->ops[i].flags = 0;
+
+ if (copy_from_user(&op, userptr, sizeof(op))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = lookup_op(job, &op);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret) {
+ job->nr_ops = 0;
+ goto out;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_gem_object *obj;
+
+ if (!job->ops[i].handle) {
+ job->ops[i].obj = NULL;
+ continue;
+ }
+
+ /*
+ * normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, job->ops[i].handle);
+ if (!obj) {
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ goto out_unlock;
+ }
+
+ drm_gem_object_get(obj);
+
+ job->ops[i].obj = obj;
+ cnt++;
+ }
+
+ *nr_bos = cnt;
+
+out_unlock:
+ spin_unlock(&file->table_lock);
+
+out:
+ return ret;
+}
+
+static void
+prealloc_count(struct msm_vm_bind_job *job,
+ struct msm_vm_bind_op *first,
+ struct msm_vm_bind_op *last)
+{
+ struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
+
+ if (!first)
+ return;
+
+ uint64_t start_iova = first->iova;
+ uint64_t end_iova = last->iova + last->range;
+
+ mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
+}
+
+static bool
+ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
+{
+ /*
+ * Last level pte covers 2MB.. so we should merge two ops, from
+ * the PoV of figuring out how much pgtable pages to pre-allocate
+ * if they land in the same 2MB range:
+ */
+ uint64_t pte_mask = ~(SZ_2M - 1);
+ return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
+}
+
+/*
+ * Determine the amount of memory to prealloc for pgtables. For sparse images,
+ * in particular, userspace plays some tricks with the order of page mappings
+ * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
+ * So detect when multiple MAP operations are physically contiguous, and count
+ * them as a single mapping. Otherwise the prealloc_count() will not realize
+ * they can share pagetable pages and vastly overcount.
+ */
+static int
+vm_bind_prealloc_count(struct msm_vm_bind_job *job)
+{
+ struct msm_vm_bind_op *first = NULL, *last = NULL;
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ int ret;
+
+ for (int i = 0; i < job->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ /* We only care about MAP/MAP_NULL: */
+ if (op->op == MSM_VM_BIND_OP_UNMAP)
+ continue;
+
+ /*
+ * If op is contiguous with last in the current range, then
+ * it becomes the new last in the range and we continue
+ * looping:
+ */
+ if (last && ops_are_same_pte(last, op)) {
+ last = op;
+ continue;
+ }
+
+ /*
+ * If op is not contiguous with the current range, flush
+ * the current range and start anew:
+ */
+ prealloc_count(job, first, last);
+ first = last = op;
+ }
+
+ /* Flush the remaining range: */
+ prealloc_count(job, first, last);
+
+ /*
+ * Now that we know the needed amount to pre-alloc, throttle on pending
+ * VM_BIND jobs if we already have too much pre-alloc memory in flight
+ */
+ ret = wait_event_interruptible(
+ vm->prealloc_throttle.wait,
+ atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
+ if (ret)
+ return ret;
+
+ atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ return 0;
+}
+
+/*
+ * Lock VM and GEM objects
+ */
+static int
+vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
+{
+ int ret;
+
+ /* Lock VM and objects: */
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
+ op->iova,
+ op->obj_offset);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
+ op->iova, op->range,
+ op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ WARN_ON("unreachable");
+ }
+
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Pin GEM objects, ensuring that we have backing pages. Pinning will move
+ * the object to the pinned LRU so that the shrinker knows to first consider
+ * other objects for evicting.
+ */
+static int
+vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ /*
+ * First loop, before holding the LRU lock, avoids holding the
+ * LRU lock while calling msm_gem_pin_vma_locked (which could
+ * trigger get_pages())
+ */
+ job_foreach_bo (obj, job) {
+ struct page **pages;
+
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ }
+
+ struct msm_drm_private *priv = job->vm->drm->dev_private;
+
+ /*
+ * A second loop while holding the LRU lock (a) avoids acquiring/dropping
+ * the LRU lock for each individual bo, while (b) avoiding holding the
+ * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
+ * get_pages() which could trigger reclaim.. and if we held the LRU lock
+ * could trigger deadlock with the shrinker).
+ */
+ mutex_lock(&priv->lru.lock);
+ job_foreach_bo (obj, job)
+ msm_gem_pin_obj_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+
+ job->bos_pinned = true;
+
+ return 0;
+}
+
+/*
+ * Unpin GEM objects. Normally this is done after the bind job is run.
+ */
+static void
+vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ if (!job->bos_pinned)
+ return;
+
+ job_foreach_bo (obj, job)
+ msm_gem_unpin_locked(obj);
+
+ job->bos_pinned = false;
+}
+
+/*
+ * Pre-allocate pgtable memory, and translate the VM bind requests into a
+ * sequence of pgtable updates to be applied asynchronously.
+ */
+static int
+vm_bind_job_prepare(struct msm_vm_bind_job *job)
+{
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct msm_mmu *mmu = vm->mmu;
+ int ret;
+
+ ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+ struct op_arg arg = {
+ .job = job,
+ };
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
+ op->range);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ if (op->flags & MSM_VM_BIND_OP_DUMP)
+ arg.flags |= MSM_VMA_DUMP;
+ fallthrough;
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
+ op->range, op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ BUG_ON("unreachable");
+ }
+
+ if (ret) {
+ /*
+ * If we've already started modifying the vm, we can't
+ * adequetly describe to userspace the intermediate
+ * state the vm is in. So throw up our hands!
+ */
+ if (i > 0)
+ msm_gem_vm_unusable(job->vm);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Attach fences to the GEM objects being bound. This will signify to
+ * the shrinker that they are busy even after dropping the locks (ie.
+ * drm_exec_fini())
+ */
+static void
+vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
+{
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ struct drm_gem_object *obj = job->ops[i].obj;
+
+ if (!obj)
+ continue;
+
+ dma_resv_add_fence(obj->resv, job->fence,
+ DMA_RESV_USAGE_KERNEL);
+ }
+}
+
+int
+msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_vm_bind *args = data;
+ struct msm_context *ctx = file->driver_priv;
+ struct msm_vm_bind_job *job = NULL;
+ struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_syncobj_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
+ struct dma_fence *fence;
+ int out_fence_fd = -1;
+ int ret, nr_bos = 0;
+ unsigned i;
+
+ if (!gpu)
+ return -ENXIO;
+
+ /*
+ * Maybe we could allow just UNMAP ops? OTOH userspace should just
+ * immediately close the device file and all will be torn down.
+ */
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
+
+ /*
+ * Technically, you cannot create a VM_BIND submitqueue in the first
+ * place, if you haven't opted in to VM_BIND context. But it is
+ * cleaner / less confusing, to check this case directly.
+ */
+ if (!msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "context does not support vmbind");
+
+ if (args->flags & ~MSM_VM_BIND_FLAGS)
+ return UERR(EINVAL, dev, "invalid flags");
+
+ queue = msm_submitqueue_get(ctx, args->queue_id);
+ if (!queue)
+ return -ENOENT;
+
+ if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_post_unlock;
+ }
+ }
+
+ job = vm_bind_job_create(dev, file, queue, args->nr_ops);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_post_unlock;
+ }
+
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence) {
+ ret = UERR(EINVAL, dev, "invalid in-fence");
+ goto out_unlock;
+ }
+
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (args->in_syncobjs > 0) {
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
+ }
+
+ if (args->out_syncobjs > 0) {
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_unlock;
+ }
+ }
+
+ ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
+ if (ret)
+ goto out_unlock;
+
+ ret = vm_bind_prealloc_count(job);
+ if (ret)
+ goto out_unlock;
+
+ struct drm_exec exec;
+ unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
+ drm_exec_init(&exec, flags, nr_bos + 1);
+
+ ret = vm_bind_job_lock_objects(job, &exec);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_pin_objects(job);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_prepare(job);
+ if (ret)
+ goto out;
+
+ drm_sched_job_arm(&job->base);
+
+ job->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ sync_file = sync_file_create(job->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+ }
+
+ if (ret)
+ goto out;
+
+ vm_bind_job_attach_fences(job);
+
+ /*
+ * The job can be free'd (and fence unref'd) at any point after
+ * drm_sched_entity_push_job(), so we need to hold our own ref
+ */
+ fence = dma_fence_get(job->fence);
+
+ drm_sched_entity_push_job(&job->base);
+
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
+
+ dma_fence_put(fence);
+
+out:
+ if (ret)
+ vm_bind_job_unpin_objects(job);
+
+ drm_exec_fini(&exec);
+out_unlock:
+ mutex_unlock(&queue->lock);
+out_post_unlock:
+ if (ret && (out_fence_fd >= 0)) {
+ put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
+
+ if (!IS_ERR_OR_NULL(job)) {
+ if (ret)
+ msm_vma_job_free(&job->base);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
+
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c380d9d9f5af..c317b25a8162 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -148,7 +148,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
@@ -219,13 +219,14 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct drm_gem_object *obj, u64 iova, bool full)
+ struct drm_gem_object *obj, u64 iova,
+ bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
- state_bo->size = obj->size;
+ state_bo->size = size;
state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
@@ -236,28 +237,129 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
+ state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(obj);
ptr = msm_gem_get_vaddr_active(obj);
- msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->size);
- msm_gem_put_vaddr(obj);
+ memcpy(state_bo->data, ptr + offset, size);
+ msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
+static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
+{
+ extern bool rd_full;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_exec exec;
+ struct drm_gpuva *vma;
+ unsigned cnt = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ cnt = 0;
+
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ if (!vma->gem.obj)
+ continue;
+
+ cnt++;
+ drm_exec_lock_obj(&exec, vma->gem.obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ }
+
+ drm_gpuvm_for_each_va (vma, submit->vm)
+ cnt++;
+
+ state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
+
+ drm_exec_fini(&exec);
+ } else {
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (int i = 0; state->bos && i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ msm_gem_lock(obj);
+ msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
+ dump, 0, obj->size);
+ msm_gem_unlock(obj);
+ }
+ }
+}
+
+static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
+{
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ int first;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ state->nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ state->nr_vm_logs = vm_log_len;
+ }
+
+ state->vm_logs = kmalloc_array(
+ state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ for (int i = 0; i < state->nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+
+ state->vm_logs[i] = vm->log[idx];
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
+
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
struct msm_gpu_state *state;
@@ -276,28 +378,21 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
- state->fault_info = gpu->fault_info;
-
- if (submit) {
- int i;
-
- if (state->fault_info.ttbr0) {
- struct msm_gpu_fault_info *info = &state->fault_info;
- struct msm_mmu *mmu = submit->aspace->mmu;
+ if (fault_info)
+ state->fault_info = *fault_info;
- msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
- &info->asid);
- msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
- }
+ if (submit && state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
- state->bos = kcalloc(submit->nr_bos,
- sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
- for (i = 0; state->bos && i < submit->nr_bos; i++) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova,
- should_dump(submit, i));
- }
+ if (submit) {
+ crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
+ crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
@@ -308,7 +403,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
}
#endif
@@ -339,7 +435,7 @@ static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
@@ -386,8 +482,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
+ if (submit->vm) {
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+
+ vm->faults++;
+
+ /*
+ * If userspace has opted-in to VM_BIND (and therefore userspace
+ * management of the VM), faults mark the VM as unusuable. This
+ * matches vulkan expectations (vulkan is the main target for
+ * VM_BIND)
+ */
+ if (!vm->managed)
+ msm_gem_vm_unusable(submit->vm);
+ }
get_comm_cmdline(submit, &comm, &cmd);
@@ -405,7 +513,7 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
@@ -459,9 +567,8 @@ out_unlock:
msm_gpu_retire(gpu);
}
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
@@ -484,16 +591,13 @@ static void fault_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
- memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
mutex_unlock(&gpu->lock);
}
@@ -521,7 +625,7 @@ static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+ struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
@@ -829,10 +933,12 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
- struct msm_gem_address_space *aspace = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
@@ -840,16 +946,16 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
* If the target doesn't support private address spaces then return
* the global one
*/
- if (gpu->funcs->create_private_address_space) {
- aspace = gpu->funcs->create_private_address_space(gpu);
- if (!IS_ERR(aspace))
- aspace->pid = get_pid(task_pid(task));
+ if (gpu->funcs->create_private_vm) {
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
+ if (!IS_ERR(vm))
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
- if (IS_ERR_OR_NULL(aspace))
- aspace = msm_gem_address_space_get(gpu->aspace);
+ if (IS_ERR_OR_NULL(vm))
+ vm = drm_gpuvm_get(gpu->vm);
- return aspace;
+ return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -882,7 +988,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
- kthread_init_work(&gpu->fault_work, fault_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
@@ -944,19 +1049,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
-
- gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
-
- if (gpu->aspace == NULL)
- DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- else if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
+ gpu->vm = gpu->funcs->create_vm(gpu, pdev);
+ if (IS_ERR(gpu->vm)) {
+ ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
@@ -1000,7 +1101,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1017,11 +1118,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
- if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
- msm_gem_address_space_put(gpu->aspace);
+ if (!IS_ERR_OR_NULL(gpu->vm)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index e25009150579..b2a96544f92a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -20,9 +20,10 @@
#include "msm_gem.h"
struct msm_gem_submit;
+struct msm_gem_vm_log_entry;
struct msm_gpu_perfcntr;
struct msm_gpu_state;
-struct msm_file_private;
+struct msm_context;
struct msm_gpu_config {
const char *ioname;
@@ -44,9 +45,9 @@ struct msm_gpu_config {
* + z180_gpu
*/
struct msm_gpu_funcs {
- int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
- int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
int (*hw_init)(struct msm_gpu *gpu);
@@ -78,10 +79,8 @@ struct msm_gpu_funcs {
/* note: gpu_set_freq() can assume that we have been pm_resumed */
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
- struct msm_gem_address_space *(*create_address_space)
- (struct msm_gpu *gpu, struct platform_device *pdev);
- struct msm_gem_address_space *(*create_private_address_space)
- (struct msm_gpu *gpu);
+ struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
/**
@@ -236,7 +235,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
@@ -253,12 +252,6 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
struct timer_list hangcheck_timer;
- /* Fault info for most recent iova fault: */
- struct msm_gpu_fault_info fault_info;
-
- /* work for handling GPU ioval faults: */
- struct kthread_work fault_work;
-
/* work for handling GPU recovery: */
struct kthread_work recover_work;
@@ -347,26 +340,61 @@ struct msm_gpu_perfcntr {
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
- * struct msm_file_private - per-drm_file context
- *
- * @queuelock: synchronizes access to submitqueues list
- * @submitqueues: list of &msm_gpu_submitqueue created by userspace
- * @queueid: counter incremented each time a submitqueue is created,
- * used to assign &msm_gpu_submitqueue.id
- * @aspace: the per-process GPU address-space
- * @ref: reference count
- * @seqno: unique per process seqno
+ * struct msm_context - per-drm_file context
*/
-struct msm_file_private {
+struct msm_context {
+ /** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
+
+ /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
+
+ /**
+ * @queueid:
+ *
+ * Counter incremented each time a submitqueue is created, used to
+ * assign &msm_gpu_submitqueue.id
+ */
int queueid;
- struct msm_gem_address_space *aspace;
+
+ /**
+ * @closed: The device file associated with this context has been closed.
+ *
+ * Once the device is closed, any submits that have not been written
+ * to the ring buffer are no-op'd.
+ */
+ bool closed;
+
+ /**
+ * @userspace_managed_vm:
+ *
+ * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+ * MSM_PARAM_EN_VM_BIND?
+ */
+ bool userspace_managed_vm;
+
+ /**
+ * @vm:
+ *
+ * The per-process GPU address-space. Do not access directly, use
+ * msm_context_vm().
+ */
+ struct drm_gpuvm *vm;
+
+ /** @kref: the reference count */
struct kref ref;
+
+ /**
+ * @seqno:
+ *
+ * A unique per-process sequence number. Used to detect context
+ * switches, without relying on keeping a, potentially dangling,
+ * pointer to the previous context.
+ */
int seqno;
/**
- * sysprof:
+ * @sysprof:
*
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
@@ -384,21 +412,21 @@ struct msm_file_private {
int sysprof;
/**
- * comm: Overridden task comm, see MSM_PARAM_COMM
+ * @comm: Overridden task comm, see MSM_PARAM_COMM
*
* Accessed under msm_gpu::lock
*/
char *comm;
/**
- * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
*
* Accessed under msm_gpu::lock
*/
char *cmdline;
/**
- * elapsed:
+ * @elapsed:
*
* The total (cumulative) elapsed time GPU was busy with rendering
* from this context in ns.
@@ -406,7 +434,7 @@ struct msm_file_private {
uint64_t elapsed_ns;
/**
- * cycles:
+ * @cycles:
*
* The total (cumulative) GPU cycles elapsed attributed to this
* context.
@@ -414,7 +442,7 @@ struct msm_file_private {
uint64_t cycles;
/**
- * entities:
+ * @entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
@@ -427,7 +455,7 @@ struct msm_file_private {
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
/**
- * ctx_mem:
+ * @ctx_mem:
*
* Total amount of memory of GEM buffers with handles attached for
* this context.
@@ -435,6 +463,24 @@ struct msm_file_private {
atomic64_t ctx_mem;
};
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
+
+/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+ return ctx->userspace_managed_vm;
+}
+
/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
@@ -512,13 +558,16 @@ struct msm_gpu_submitqueue {
u32 ring_nr;
int faults;
uint32_t last_fence;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
struct list_head node;
struct idr fence_idr;
struct spinlock idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
+
+ /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */
+ struct drm_sched_entity _vm_bind_entity[0];
};
struct msm_gpu_state_bo {
@@ -555,6 +604,9 @@ struct msm_gpu_state {
struct msm_gpu_fault_info fault_info;
+ int nr_vm_logs;
+ struct msm_gem_vm_log_entry *vm_logs;
+
int nr_bos;
struct msm_gpu_state_bo *bos;
};
@@ -608,33 +660,32 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p);
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
+ struct msm_context *ctx,
u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id);
+void msm_submitqueue_close(struct msm_context *ctx);
void msm_submitqueue_destroy(struct kref *kref);
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof);
-void __msm_file_private_destroy(struct kref *kref);
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof);
+void __msm_context_destroy(struct kref *kref);
-static inline void msm_file_private_put(struct msm_file_private *ctx)
+static inline void msm_context_put(struct msm_context *ctx)
{
- kref_put(&ctx->ref, __msm_file_private_destroy);
+ kref_put(&ctx->ref, __msm_context_destroy);
}
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
+static inline struct msm_context *msm_context_get(
+ struct msm_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
@@ -662,12 +713,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed);
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
+bool adreno_has_gpu(struct device_node *node);
void __init adreno_register(void);
void __exit adreno_unregister(void);
@@ -705,6 +758,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->lock);
}
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
+
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 6970b0f7f457..2e1d5c343272 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
+ df->suspended = true;
ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 7f863282db0d..781bbe5540bd 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -205,6 +205,20 @@ TRACE_EVENT(msm_gpu_preemption_irq,
TP_printk("preempted to %u", __entry->ring_id)
);
+TRACE_EVENT(msm_mmu_prealloc_cleanup,
+ TP_PROTO(u32 count, u32 remaining),
+ TP_ARGS(count, remaining),
+ TP_STRUCT__entry(
+ __field(u32, count)
+ __field(u32, remaining)
+ ),
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index fd73dcd3f30e..55c29f49b788 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -6,13 +6,18 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
+#include <linux/kmemleak.h>
#include "msm_drv.h"
+#include "msm_gpu_trace.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
+ struct page *prr_page;
+
+ struct kmem_cache *pt_cache;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@@ -26,6 +31,9 @@ struct msm_iommu_pagetable {
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
+
+ /** @root_page_table: Stores the root page table pointer. */
+ void *root_page_table;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
@@ -93,15 +101,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ int ret = 0;
while (size) {
- size_t unmapped, pgsize, count;
+ size_t pgsize, count;
+ ssize_t unmapped;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
- if (!unmapped)
- break;
+ if (unmapped <= 0) {
+ ret = -EINVAL;
+ /*
+ * Continue attempting to unamp the remained of the
+ * range, so we don't end up with some dangling
+ * mapped pages
+ */
+ unmapped = PAGE_SIZE;
+ }
iova += unmapped;
size -= unmapped;
@@ -109,11 +126,42 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (size == 0) ? 0 : -EINVAL;
+ return ret;
+}
+
+static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+ phys_addr_t phys = page_to_phys(iommu->prr_page);
+ u64 addr = iova;
+
+ while (len) {
+ size_t mapped = 0;
+ size_t size = PAGE_SIZE;
+ int ret;
+
+ ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ addr += mapped;
+ len -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -121,10 +169,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova;
unsigned int i;
+ if (!sgt)
+ return msm_iommu_pagetable_map_prr(mmu, iova, len, prot);
+
for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
+ if (!len)
+ break;
+
+ if (size <= off) {
+ off -= size;
+ continue;
+ }
+
+ phys += off;
+ size -= off;
+ size = min_t(size_t, size, len);
+ off = 0;
+
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
@@ -140,6 +204,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
phys += mapped;
addr += mapped;
size -= mapped;
+ len -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -162,9 +227,16 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0)
+ if (atomic_dec_return(&iommu->pagetables) == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+ if (adreno_smmu->set_prr_bit) {
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, false);
+ __free_page(iommu->prr_page);
+ iommu->prr_page = NULL;
+ }
+ }
+
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
}
@@ -217,7 +289,148 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[
return 0;
}
+static void
+msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len)
+{
+ u64 pt_count;
+
+ /*
+ * L1, L2 and L3 page tables.
+ *
+ * We could optimize L3 allocation by iterating over the sgt and merging
+ * 2M contiguous blocks, but it's simpler to over-provision and return
+ * the pages if they're not used.
+ *
+ * The first level descriptor (v8 / v7-lpae page table format) encodes
+ * 30 bits of address. The second level encodes 29. For the 3rd it is
+ * 39.
+ *
+ * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
+ */
+ pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) +
+ ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) +
+ ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21);
+
+ p->count += pt_count;
+}
+
+static struct kmem_cache *
+get_pt_cache(struct msm_mmu *mmu)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ return to_msm_iommu(pagetable->parent)->pt_cache;
+}
+
+static int
+msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ int ret;
+
+ p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL);
+ if (!p->pages)
+ return -ENOMEM;
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
+ if (ret != p->count) {
+ p->count = ret;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ uint32_t remaining_pt_count = p->count - p->ptr;
+
+ if (p->count > 0)
+ trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+
+ kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
+ kvfree(p->pages);
+}
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *
+msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct msm_mmu_prealloc *p = pagetable->base.prealloc;
+ void *page;
+
+ /* Allocation of the root page table happening during init. */
+ if (unlikely(!pagetable->root_page_table)) {
+ struct page *p;
+
+ p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
+ gfp | __GFP_ZERO, get_order(size));
+ page = p ? page_address(p) : NULL;
+ pagetable->root_page_table = page;
+ return page;
+ }
+
+ if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
+ return NULL;
+
+ page = p->pages[p->ptr++];
+ memset(page, 0, size);
+
+ /*
+ * Page table entries don't use virtual addresses, which trips out
+ * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+ * are mixed with other fields, and I fear kmemleak won't detect that
+ * either.
+ *
+ * Let's just ignore memory passed to the page-table driver for now.
+ */
+ kmemleak_ignore(page);
+
+ return page;
+}
+
+
+/**
+ * free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void
+msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+
+ if (unlikely(pagetable->root_page_table == data)) {
+ free_pages((unsigned long)data, get_order(size));
+ pagetable->root_page_table = NULL;
+ return;
+ }
+
+ kmem_cache_free(get_pt_cache(&pagetable->base), data);
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
+ .prealloc_count = msm_iommu_pagetable_prealloc_count,
+ .prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
+ .prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
@@ -268,7 +481,18 @@ static const struct iommu_flush_ops tlb_ops = {
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
+{
+ int pg_shift, bits_per_level;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ /* arm_lpae_iopte is u64: */
+ bits_per_level = pg_shift - ilog2(sizeof(u64));
+
+ return sizeof(u64) << bits_per_level;
+}
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -302,6 +526,36 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &tlb_ops;
+ if (!kernel_managed) {
+ ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
+
+ /*
+ * With userspace managed VM (aka VM_BIND), we need to pre-
+ * allocate pages ahead of time for map/unmap operations,
+ * handing them to io-pgtable via custom alloc/free ops as
+ * needed:
+ */
+ ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
+ ttbr0_cfg.free = msm_iommu_pagetable_free_pt;
+
+ /*
+ * Restrict to single page granules. Otherwise we may run
+ * into a situation where userspace wants to unmap/remap
+ * only a part of a larger block mapping, which is not
+ * possible without unmapping the entire block. Which in
+ * turn could cause faults if the GPU is accessing other
+ * parts of the block mapping.
+ *
+ * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm:
+ * Remove split on unmap behavior)" this was handled in
+ * io-pgtable-arm. But this apparently does not work
+ * correctly on SMMUv3.
+ */
+ WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
+ ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
+ }
+
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, pagetable);
@@ -321,12 +575,30 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
kfree(pagetable);
return ERR_PTR(ret);
}
+
+ BUG_ON(iommu->prr_page);
+ if (adreno_smmu->set_prr_bit) {
+ /*
+ * We need a zero'd page for two reasons:
+ *
+ * 1) Reserve a known physical address to use when
+ * mapping NULL / sparsely resident regions
+ * 2) Read back zero
+ *
+ * It appears the hw drops writes to the PRR region
+ * on the floor, but reads actually return whatever
+ * is in the PRR page.
+ */
+ iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ adreno_smmu->set_prr_addr(adreno_smmu->cookie,
+ page_to_phys(iommu->prr_page));
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
+ }
}
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->tlb = ttbr1_cfg->tlb;
- pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
@@ -345,7 +617,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
- struct msm_mmu *mmu = &iommu->base;
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
struct adreno_smmu_fault_info info, *ptr = NULL;
@@ -359,9 +630,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
- if (mmu->funcs->resume_translation)
- mmu->funcs->resume_translation(mmu);
-
return 0;
}
@@ -376,12 +644,12 @@ static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *de
return -ENOSYS;
}
-static void msm_iommu_resume_translation(struct msm_mmu *mmu)
+static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
- if (adreno_smmu->resume_translation)
- adreno_smmu->resume_translation(adreno_smmu->cookie, true);
+ if (adreno_smmu->set_stall)
+ adreno_smmu->set_stall(adreno_smmu->cookie, enable);
}
static void msm_iommu_detach(struct msm_mmu *mmu)
@@ -392,11 +660,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
+ WARN_ON(off != 0);
+
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
@@ -423,6 +694,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
+ kmem_cache_destroy(iommu->pt_cache);
kfree(iommu);
}
@@ -431,7 +703,7 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
- .resume_translation = msm_iommu_resume_translation,
+ .set_stall = msm_iommu_set_stall,
};
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
@@ -496,6 +768,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
+ if (adreno_smmu && adreno_smmu->cookie) {
+ const struct io_pgtable_cfg *cfg =
+ adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+ size_t tblsz = get_tblsz(cfg);
+
+ iommu->pt_cache =
+ kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
+ }
iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 35d5397e73b4..6889f1c1e721 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
+#include <drm/clients/drm_client_setup.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
@@ -137,7 +138,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
vbl_work->enable = enable;
vbl_work->priv = priv;
- queue_work(priv->wq, &vbl_work->work);
+ queue_work(priv->kms->wq, &vbl_work->work);
return 0;
}
@@ -176,9 +177,9 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
@@ -204,17 +205,26 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
return NULL;
}
- aspace = msm_gem_address_space_create(mmu, "mdp_kms",
- 0x1000, 0x100000000 - 0x1000);
- if (IS_ERR(aspace)) {
- dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
+ vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000, true);
+ if (IS_ERR(vm)) {
+ dev_err(mdp_dev, "vm create, error %pe\n", vm);
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
- msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler);
- return aspace;
+ return vm;
+}
+
+void msm_drm_kms_unregister(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_atomic_helper_shutdown(ddev);
}
void msm_drm_kms_uninit(struct device *dev)
@@ -227,10 +237,17 @@ void msm_drm_kms_uninit(struct device *dev)
BUG_ON(!kms);
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(kms->wq);
+
/* clean up event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->event_thread[i].worker)
- kthread_destroy_worker(priv->event_thread[i].worker);
+ for (i = 0; i < MAX_CRTCS; i++) {
+ if (kms->event_thread[i].worker)
+ kthread_destroy_worker(kms->event_thread[i].worker);
}
drm_kms_helper_poll_fini(ddev);
@@ -261,7 +278,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- return ret;
+ goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -283,7 +300,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
struct msm_drm_thread *ev_thread;
/* initialize event thread */
- ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
+ ev_thread = &kms->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
@@ -296,7 +313,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
sched_set_fifo(ev_thread->worker->task);
}
- ret = drm_vblank_init(ddev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, ddev->mode_config.num_crtc);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
@@ -359,3 +376,13 @@ void msm_kms_shutdown(struct platform_device *pdev)
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
+
+void msm_drm_kms_post_init(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_kms_helper_poll_init(ddev);
+ drm_client_setup(ddev, NULL);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 43b58d052ee6..8a7be7b854de 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -13,6 +13,8 @@
#include "msm_drv.h"
+#ifdef CONFIG_DRM_MSM_KMS
+
#define MAX_PLANE 4
/* As there are different display controller blocks depending on the
@@ -127,10 +129,22 @@ struct msm_pending_timer {
unsigned crtc_idx;
};
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct kthread_worker *worker;
+};
+
struct msm_kms {
const struct msm_kms_funcs *funcs;
struct drm_device *dev;
+ struct hdmi *hdmi;
+
+ struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
+
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
+
/* irq number to be passed on to msm_irq_install */
int irq;
bool irq_requested;
@@ -139,7 +153,7 @@ struct msm_kms {
atomic_t fault_snapshot_capture;
/* mapper-id used to request GEM buffer mapped for scanout: */
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* disp snapshot support */
struct kthread_worker *dump_worker;
@@ -153,6 +167,9 @@ struct msm_kms {
struct mutex commit_lock[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
+
+ struct workqueue_struct *wq;
+ struct msm_drm_thread event_thread[MAX_CRTCS];
};
static inline int msm_kms_init(struct msm_kms *kms,
@@ -165,6 +182,10 @@ static inline int msm_kms_init(struct msm_kms *kms,
kms->funcs = funcs;
+ kms->wq = alloc_ordered_workqueue("msm", 0);
+ if (!kms->wq)
+ return -ENOMEM;
+
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
if (ret) {
@@ -181,6 +202,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
+
+ destroy_workqueue(kms->wq);
}
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
@@ -192,6 +215,29 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
+void msm_drm_kms_post_init(struct device *dev);
+void msm_drm_kms_unregister(struct device *dev);
void msm_drm_kms_uninit(struct device *dev);
+#else /* ! CONFIG_DRM_MSM_KMS */
+
+static inline int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+{
+ return -ENODEV;
+}
+
+static inline void msm_drm_kms_post_init(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_unregister(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_uninit(struct device *dev)
+{
+}
+
+#endif
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index dcb49fd30402..1f5fe7811e01 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -16,14 +16,17 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include "msm_mdss.h"
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_kms.h"
#include <generated/mdss.xml.h>
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
-#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+struct msm_mdss_data {
+ u32 reg_bus_bw;
+};
struct msm_mdss {
struct device *dev;
@@ -36,7 +39,8 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
- const struct msm_mdss_data *mdss_data;
+ const struct qcom_ubwc_cfg_data *mdss_data;
+ u32 reg_bus_bw;
struct icc_path *mdp_path[2];
u32 num_mdp_paths;
struct icc_path *reg_bus_path;
@@ -150,7 +154,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_add_linear(dev->of_node, 32,
+ domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
@@ -165,9 +169,9 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -180,9 +184,9 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->macrotile_mode)
value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
@@ -198,9 +202,9 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -222,67 +226,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
}
}
-#define MDSS_HW_MAJ_MIN \
- (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
-
-#define MDSS_HW_MSM8996 0x1007
-#define MDSS_HW_MSM8937 0x100e
-#define MDSS_HW_MSM8953 0x1010
-#define MDSS_HW_MSM8998 0x3000
-#define MDSS_HW_SDM660 0x3002
-#define MDSS_HW_SDM630 0x3003
-
-/*
- * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data
- */
-static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_mdss *mdss)
-{
- struct msm_mdss_data *data;
- u32 hw_rev;
-
- data = devm_kzalloc(mdss->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION);
- hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8937 ||
- hw_rev == MDSS_HW_MSM8953 ||
- hw_rev == MDSS_HW_MSM8998 ||
- hw_rev == MDSS_HW_SDM660 ||
- hw_rev == MDSS_HW_SDM630) {
- data->ubwc_dec_version = UBWC_1_0;
- data->ubwc_enc_version = UBWC_1_0;
- }
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8998)
- data->highest_bank_bit = 2;
- else
- data->highest_bank_bit = 1;
-
- return data;
-}
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
+static void msm_mdss_setup_ubwc_dec_50(struct msm_mdss *msm_mdss)
{
- struct msm_mdss *mdss;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- if (!dev)
- return ERR_PTR(-EINVAL);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
- mdss = dev_get_drvdata(dev);
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
- /*
- * We could not do it at the probe time, since hw revision register was
- * not readable. Fill data structure now for the MDP5 platforms.
- */
- if (!mdss->mdss_data && mdss->is_mdp5)
- mdss->mdss_data = msm_mdss_generate_mdp5_mdss_data(mdss);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
- return mdss->mdss_data;
+ writel_relaxed(4, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
}
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
@@ -297,12 +256,8 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
for (i = 0; i < msm_mdss->num_mdp_paths; i++)
icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
- if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- msm_mdss->mdss_data->reg_bus_bw);
- else
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- DEFAULT_REG_BW);
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->reg_bus_bw);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -339,6 +294,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
+ case UBWC_5_0:
+ msm_mdss_setup_ubwc_dec_50(msm_mdss);
+ break;
default:
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
@@ -438,6 +396,7 @@ static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_d
static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
{
+ const struct msm_mdss_data *mdss_data;
struct msm_mdss *msm_mdss;
int ret;
int irq;
@@ -450,7 +409,15 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
- msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+ msm_mdss->mdss_data = qcom_ubwc_config_get_data();
+ if (IS_ERR(msm_mdss->mdss_data))
+ return ERR_CAST(msm_mdss->mdss_data);
+
+ mdss_data = of_device_get_match_data(&pdev->dev);
+ if (!mdss_data)
+ return ERR_PTR(-EINVAL);
+
+ msm_mdss->reg_bus_bw = mdss_data->reg_bus_bw;
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
@@ -569,194 +536,49 @@ static void mdss_remove(struct platform_device *pdev)
msm_mdss_destroy(mdss);
}
-static const struct msm_mdss_data msm8998_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_1_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data qcm2290_data = {
- /* no UBWC */
- .highest_bank_bit = 0x2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sa8775p_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 4,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sc7180_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
+static const struct msm_mdss_data data_57k = {
+ .reg_bus_bw = 57000,
};
-static const struct msm_mdss_data sc7280_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .macrotile_mode = true,
+static const struct msm_mdss_data data_74k = {
.reg_bus_bw = 74000,
};
-static const struct msm_mdss_data sc8180x_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sc8280xp_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm670_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm845_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6350_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm7150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
+static const struct msm_mdss_data data_76k8 = {
.reg_bus_bw = 76800,
};
-static const struct msm_mdss_data sm8150_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6115_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 7,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6125_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_3_0,
- .ubwc_swizzle = 1,
- .highest_bank_bit = 1,
-};
-
-static const struct msm_mdss_data sm6150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8250_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8350_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sm8550_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 57000,
-};
-
-static const struct msm_mdss_data x1e80100_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- /* TODO: Add reg_bus_bw with real value */
+static const struct msm_mdss_data data_153k6 = {
+ .reg_bus_bw = 153600,
};
static const struct of_device_id mdss_dt_match[] = {
- { .compatible = "qcom,mdss" },
- { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
- { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
- { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
- { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
- { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
- { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
- { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
- { .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
- { .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
- { .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
- { .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
- { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
- { .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
- { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
- { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
- { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
- { .compatible = "qcom,x1e80100-mdss", .data = &x1e80100_data},
+ { .compatible = "qcom,mdss", .data = &data_153k6 },
+ { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sa8775p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sar2130p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sdm845-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7180-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7280-mdss", .data = &data_74k },
+ { .compatible = "qcom,sc8180x-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc8280xp-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6115-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6125-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6350-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6375-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm7150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8250-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8350-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8450-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8550-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8650-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8750-mdss", .data = &data_57k },
+ /* TODO: x1e8: Add reg_bus_bw with real value */
+ { .compatible = "qcom,x1e80100-mdss", .data = &data_153k6 },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
deleted file mode 100644
index 14dc53704314..000000000000
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#ifndef __MSM_MDSS_H__
-#define __MSM_MDSS_H__
-
-struct msm_mdss_data {
- u32 ubwc_enc_version;
- /* can be read from register 0x58 */
- u32 ubwc_dec_version;
- u32 ubwc_swizzle;
- u32 highest_bank_bit;
- bool ubwc_bank_spread;
- bool macrotile_mode;
- u32 reg_bus_bw;
-};
-
-#define UBWC_1_0 0x10000000
-#define UBWC_2_0 0x20000000
-#define UBWC_3_0 0x30000000
-#define UBWC_4_0 0x40000000
-#define UBWC_4_3 0x40030000
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev);
-
-#endif /* __MSM_MDSS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index daf91529e02b..8915662fbd4d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -9,13 +9,21 @@
#include <linux/iommu.h>
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
+ void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len);
+ int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
+ void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
- void (*resume_translation)(struct msm_mmu *mmu);
+ void (*set_stall)(struct msm_mmu *mmu, bool enable);
};
enum msm_mmu_type {
@@ -24,12 +32,38 @@ enum msm_mmu_type {
MSM_MMU_IOMMU_PAGETABLE,
};
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+ /** @count: Number of pages reserved. */
+ uint32_t count;
+ /** @ptr: Index of first unused page in @pages */
+ uint32_t ptr;
+ /**
+ * @pages: Array of pages preallocated for MMU table updates.
+ *
+ * After a VM operation, there might be free pages remaining in this
+ * array (since the amount allocated is a worst-case). These are
+ * returned to the pt_cache at mmu->prealloc_cleanup().
+ */
+ void **pages;
+};
+
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
+
+ /**
+ * @prealloc: pre-allocated pages for pgtable
+ *
+ * Set while a VM_BIND job is running, serialized under
+ * msm_gem_vm::mmu_lock.
+ */
+ struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -51,7 +85,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 39138e190cb9..54493a94dcb7 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -308,21 +308,11 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
priv->hangrd = NULL;
}
-static void snapshot_buf(struct msm_rd_state *rd,
- struct msm_gem_submit *submit, int idx,
- uint64_t iova, uint32_t size, bool full)
+static void snapshot_buf(struct msm_rd_state *rd, struct drm_gem_object *obj,
+ uint64_t iova, bool full, size_t offset, size_t size)
{
- struct drm_gem_object *obj = submit->bos[idx].obj;
- unsigned offset = 0;
const char *buf;
- if (iova) {
- offset = iova - submit->bos[idx].iova;
- } else {
- iova = submit->bos[idx].iova;
- size = obj->size;
- }
-
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
@@ -333,10 +323,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (!full)
return;
- /* But only dump the contents of buffers marked READ */
- if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
- return;
-
buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
@@ -352,6 +338,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
+ extern bool rd_full;
struct task_struct *task;
char msg[256];
int i, n;
@@ -385,16 +372,43 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_gpuva *vma;
- for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t szd = submit->cmd[i].size; /* in dwords */
+ drm_gpuvm_resv_assert_held(submit->vm);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ snapshot_buf(rd, vma->gem.obj, vma->va.addr, dump,
+ vma->gem.offset, vma->va.range);
+ }
+
+ } else {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ snapshot_buf(rd, obj, submit->bos[i].iova, dump, 0, obj->size);
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+ int idx = submit->cmd[i].idx;
+ bool dump = rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!dump) {
+ struct drm_gem_object *obj = submit->bos[idx].obj;
+ size_t offset = submit->cmd[i].iova - submit->bos[idx].iova;
- /* snapshot cmdstream bo's (if we haven't already): */
- if (!should_dump(submit, i)) {
- snapshot_buf(rd, submit, submit->cmd[i].idx,
- submit->cmd[i].iova, szd * 4, true);
+ snapshot_buf(rd, obj, submit->cmd[i].iova, true,
+ offset, szd * 4);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c5651c39ac2a..b2f612e5dc79 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -17,6 +17,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned nr_cmds = submit->nr_cmds;
int i;
msm_fence_init(submit->hw_fence, fctx);
@@ -36,8 +37,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
+ if (submit->queue->ctx->closed)
+ submit->nr_cmds = 0;
+
msm_gpu_submit(gpu, submit);
+ submit->nr_cmds = nr_cmds;
+
mutex_unlock(&gpu->lock);
return dma_fence_get(submit->hw_fence);
@@ -84,7 +90,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
- gpu->aspace, &ring->bo, &ring->iova);
+ gpu->vm, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
@@ -93,7 +99,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
- args.name = to_msm_bo(ring->bo)->name,
+ args.name = to_msm_bo(ring->bo)->name;
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@@ -131,7 +137,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
+ msm_gem_kernel_put(ring->bo, ring->gpu->vm);
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 7fed1de63b5d..8617a82cd6b3 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -7,8 +7,7 @@
#include "msm_gpu.h"
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof)
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof)
{
/*
* Since pm_runtime and sysprof_active are both refcounts, we
@@ -46,10 +45,10 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
return 0;
}
-void __msm_file_private_destroy(struct kref *kref)
+void __msm_context_destroy(struct kref *kref)
{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
+ struct msm_context *ctx = container_of(kref,
+ struct msm_context, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
@@ -60,7 +59,7 @@ void __msm_file_private_destroy(struct kref *kref)
kfree(ctx->entities[i]);
}
- msm_gem_address_space_put(ctx->aspace);
+ drm_gpuvm_put(ctx->vm);
kfree(ctx->comm);
kfree(ctx->cmdline);
kfree(ctx);
@@ -73,12 +72,15 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- msm_file_private_put(queue->ctx);
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_destroy(queue->entity);
+
+ msm_context_put(queue->ctx);
kfree(queue);
}
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
@@ -101,9 +103,9 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
return NULL;
}
-void msm_submitqueue_close(struct msm_file_private *ctx)
+void msm_submitqueue_close(struct msm_context *ctx)
{
- struct msm_gpu_submitqueue *entry, *tmp;
+ struct msm_gpu_submitqueue *queue, *tmp;
if (!ctx)
return;
@@ -112,14 +114,21 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
- list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
- list_del(&entry->node);
- msm_submitqueue_put(entry);
+ list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ list_del(&queue->node);
+ msm_submitqueue_put(queue);
}
+
+ if (!ctx->vm)
+ return;
+
+ msm_gem_vm_close(ctx->vm);
}
static struct drm_sched_entity *
-get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+get_sched_entity(struct msm_context *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
@@ -155,14 +164,12 @@ get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
return ctx->entities[idx];
}
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
- extern int enable_preemption;
- bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -172,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (!priv->gpu)
return -ENODEV;
- preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ unsigned sz;
- if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
- return -EINVAL;
+ /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
+ if (!msm_context_is_vmbind(ctx))
+ return -EINVAL;
- ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
- if (ret)
- return ret;
+ if (prio)
+ return -EINVAL;
+
+ sz = struct_size(queue, _vm_bind_entity, 1);
+ queue = kzalloc(sz, GFP_KERNEL);
+ } else {
+ extern int enable_preemption;
+ bool preemption_supported =
+ priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ }
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
- queue->ring_nr = ring_nr;
- queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
- ring_nr, sched_prio);
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
+
+ queue->entity = &queue->_vm_bind_entity[0];
+
+ drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, NULL);
+ } else {
+ queue->ring_nr = ring_nr;
+
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ }
+
if (IS_ERR(queue->entity)) {
ret = PTR_ERR(queue->entity);
kfree(queue);
@@ -200,7 +234,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_lock(&ctx->queuelock);
- queue->ctx = msm_file_private_get(ctx);
+ queue->ctx = msm_context_get(ctx);
queue->id = ctx->queueid++;
if (id)
@@ -221,7 +255,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
* Create the default submit-queue (id==0), used for backwards compatibility
* for userspace that pre-dates the introduction of submitqueues.
*/
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio, max_priority;
@@ -261,7 +295,7 @@ static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
return ret ? -EFAULT : 0;
}
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args)
{
struct msm_gpu_submitqueue *queue;
@@ -282,7 +316,7 @@ int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
return ret;
}
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/msm/msm_syncobj.c b/drivers/gpu/drm/msm/msm_syncobj.c
new file mode 100644
index 000000000000..4baa9f522c54
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#include "drm/drm_drv.h"
+
+#include "msm_drv.h"
+#include "msm_syncobj.h"
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SYNCOBJ_FLAGS) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags: %x", syncobj_desc.flags);
+ break;
+ }
+
+ ret = drm_sched_job_add_syncobj_dependency(job, file,
+ syncobj_desc.handle,
+ syncobj_desc.point);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle: %u", i);
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+void
+msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_syncobj_post_dep *post_deps;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ dma_fence_chain_free(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+void
+msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/msm_syncobj.h b/drivers/gpu/drm/msm/msm_syncobj.h
new file mode 100644
index 000000000000..bcaa15d01da0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#ifndef __MSM_GEM_SYNCOBJ_H__
+#define __MSM_GEM_SYNCOBJ_H__
+
+#include "drm/drm_device.h"
+#include "drm/drm_syncobj.h"
+#include "drm/gpu_scheduler.h"
+
+struct msm_syncobj_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs);
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence);
+
+#endif /* __MSM_GEM_SYNCOBJ_H__ */
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2db425abf0f3..d860fd94feae 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -5,6 +5,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<import file="freedreno_copyright.xml"/>
<import file="adreno/adreno_common.xml"/>
<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+<import file="adreno/a7xx_enums.xml"/>
+<import file="adreno/a6xx_perfcntrs.xml"/>
+<import file="adreno/a7xx_perfcntrs.xml"/>
+<import file="adreno/a6xx_descriptors.xml"/>
<!--
Each register that is actually being used by driver should have "usage" defined,
@@ -20,2205 +25,6 @@ is either overwritten by renderpass/blit (ib2) or not used if not overwritten
by a particular renderpass/blit.
-->
-<!-- these might be same as a5xx -->
-<enum name="a6xx_tile_mode">
- <value name="TILE6_LINEAR" value="0"/>
- <value name="TILE6_2" value="2"/>
- <value name="TILE6_3" value="3"/>
-</enum>
-
-<enum name="a6xx_format">
- <value value="0x02" name="FMT6_A8_UNORM"/>
- <value value="0x03" name="FMT6_8_UNORM"/>
- <value value="0x04" name="FMT6_8_SNORM"/>
- <value value="0x05" name="FMT6_8_UINT"/>
- <value value="0x06" name="FMT6_8_SINT"/>
-
- <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
- <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
- <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
- <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
-
- <value value="0x0f" name="FMT6_8_8_UNORM"/>
- <value value="0x10" name="FMT6_8_8_SNORM"/>
- <value value="0x11" name="FMT6_8_8_UINT"/>
- <value value="0x12" name="FMT6_8_8_SINT"/>
- <value value="0x13" name="FMT6_L8_A8_UNORM"/>
-
- <value value="0x15" name="FMT6_16_UNORM"/>
- <value value="0x16" name="FMT6_16_SNORM"/>
- <value value="0x17" name="FMT6_16_FLOAT"/>
- <value value="0x18" name="FMT6_16_UINT"/>
- <value value="0x19" name="FMT6_16_SINT"/>
-
- <value value="0x21" name="FMT6_8_8_8_UNORM"/>
- <value value="0x22" name="FMT6_8_8_8_SNORM"/>
- <value value="0x23" name="FMT6_8_8_8_UINT"/>
- <value value="0x24" name="FMT6_8_8_8_SINT"/>
-
- <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
- <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
- <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
- <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
- <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
-
- <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
-
- <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
- <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
- <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
- <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
- <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
-
- <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
-
- <value value="0x43" name="FMT6_16_16_UNORM"/>
- <value value="0x44" name="FMT6_16_16_SNORM"/>
- <value value="0x45" name="FMT6_16_16_FLOAT"/>
- <value value="0x46" name="FMT6_16_16_UINT"/>
- <value value="0x47" name="FMT6_16_16_SINT"/>
-
- <value value="0x48" name="FMT6_32_UNORM"/>
- <value value="0x49" name="FMT6_32_SNORM"/>
- <value value="0x4a" name="FMT6_32_FLOAT"/>
- <value value="0x4b" name="FMT6_32_UINT"/>
- <value value="0x4c" name="FMT6_32_SINT"/>
- <value value="0x4d" name="FMT6_32_FIXED"/>
-
- <value value="0x58" name="FMT6_16_16_16_UNORM"/>
- <value value="0x59" name="FMT6_16_16_16_SNORM"/>
- <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
- <value value="0x5b" name="FMT6_16_16_16_UINT"/>
- <value value="0x5c" name="FMT6_16_16_16_SINT"/>
-
- <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
- <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
- <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
- <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
- <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
-
- <value value="0x65" name="FMT6_32_32_UNORM"/>
- <value value="0x66" name="FMT6_32_32_SNORM"/>
- <value value="0x67" name="FMT6_32_32_FLOAT"/>
- <value value="0x68" name="FMT6_32_32_UINT"/>
- <value value="0x69" name="FMT6_32_32_SINT"/>
- <value value="0x6a" name="FMT6_32_32_FIXED"/>
-
- <value value="0x70" name="FMT6_32_32_32_UNORM"/>
- <value value="0x71" name="FMT6_32_32_32_SNORM"/>
- <value value="0x72" name="FMT6_32_32_32_UINT"/>
- <value value="0x73" name="FMT6_32_32_32_SINT"/>
- <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
- <value value="0x75" name="FMT6_32_32_32_FIXED"/>
-
- <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
- <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
- <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
- <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
- <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
- <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
-
- <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
- <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
- <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
- <value value="0x8f" name="FMT6_NV21"/>
- <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
-
- <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
-
- <!-- Note: tiling/UBWC for these may be different from equivalent formats
- For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
- -->
- <value value="0x94" name="FMT6_NV12_Y"/>
- <value value="0x95" name="FMT6_NV12_UV"/>
- <value value="0x96" name="FMT6_NV12_VU"/>
- <value value="0x97" name="FMT6_NV12_4R"/>
- <value value="0x98" name="FMT6_NV12_4R_Y"/>
- <value value="0x99" name="FMT6_NV12_4R_UV"/>
- <value value="0x9a" name="FMT6_P010"/>
- <value value="0x9b" name="FMT6_P010_Y"/>
- <value value="0x9c" name="FMT6_P010_UV"/>
- <value value="0x9d" name="FMT6_TP10"/>
- <value value="0x9e" name="FMT6_TP10_Y"/>
- <value value="0x9f" name="FMT6_TP10_UV"/>
-
- <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
-
- <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
- <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
- <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
- <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
- <value value="0xaf" name="FMT6_ETC1"/>
- <value value="0xb0" name="FMT6_ETC2_RGB8"/>
- <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
- <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
- <value value="0xb3" name="FMT6_DXT1"/>
- <value value="0xb4" name="FMT6_DXT3"/>
- <value value="0xb5" name="FMT6_DXT5"/>
- <value value="0xb7" name="FMT6_RGTC1_UNORM"/>
- <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
- <value value="0xbb" name="FMT6_RGTC2_UNORM"/>
- <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
- <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
- <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
- <value value="0xc0" name="FMT6_BPTC"/>
- <value value="0xc1" name="FMT6_ASTC_4x4"/>
- <value value="0xc2" name="FMT6_ASTC_5x4"/>
- <value value="0xc3" name="FMT6_ASTC_5x5"/>
- <value value="0xc4" name="FMT6_ASTC_6x5"/>
- <value value="0xc5" name="FMT6_ASTC_6x6"/>
- <value value="0xc6" name="FMT6_ASTC_8x5"/>
- <value value="0xc7" name="FMT6_ASTC_8x6"/>
- <value value="0xc8" name="FMT6_ASTC_8x8"/>
- <value value="0xc9" name="FMT6_ASTC_10x5"/>
- <value value="0xca" name="FMT6_ASTC_10x6"/>
- <value value="0xcb" name="FMT6_ASTC_10x8"/>
- <value value="0xcc" name="FMT6_ASTC_10x10"/>
- <value value="0xcd" name="FMT6_ASTC_12x10"/>
- <value value="0xce" name="FMT6_ASTC_12x12"/>
-
- <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
- <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
-
- <!-- Not a hw enum, used internally in driver -->
- <value value="0xff" name="FMT6_NONE"/>
-
-</enum>
-
-<!-- probably same as a5xx -->
-<enum name="a6xx_polygon_mode">
- <value name="POLYMODE6_POINTS" value="1"/>
- <value name="POLYMODE6_LINES" value="2"/>
- <value name="POLYMODE6_TRIANGLES" value="3"/>
-</enum>
-
-<enum name="a6xx_depth_format">
- <value name="DEPTH6_NONE" value="0"/>
- <value name="DEPTH6_16" value="1"/>
- <value name="DEPTH6_24_8" value="2"/>
- <value name="DEPTH6_32" value="4"/>
-</enum>
-
-<bitset name="a6x_cp_protect" inline="yes">
- <bitfield name="BASE_ADDR" low="0" high="17"/>
- <bitfield name="MASK_LEN" low="18" high="30"/>
- <bitfield name="READ" pos="31" type="boolean"/>
-</bitset>
-
-<enum name="a6xx_shader_id">
- <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
- <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
- <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
- <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
- <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
- <value value="0x29" name="A6XX_SP_INST_DATA"/>
- <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
- <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
- <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
- <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
- <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
- <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
- <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
- <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
- <value value="0x32" name="A6XX_SP_UAV_DATA"/>
- <value value="0x33" name="A6XX_SP_INST_TAG"/>
- <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
- <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
- <value value="0x36" name="A6XX_SP_SMO_TAG"/>
- <value value="0x37" name="A6XX_SP_STATE_DATA"/>
- <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
- <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
- <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
- <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
- <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
- <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
- <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
- <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
- <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
- <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
- <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
- <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
- <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
-</enum>
-
-<enum name="a7xx_statetype_id">
- <value value="0" name="A7XX_TP0_NCTX_REG"/>
- <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
- <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
- <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
- <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
- <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
- <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
- <value value="9" name="A7XX_TP0_TMO_DATA"/>
- <value value="10" name="A7XX_TP0_SMO_DATA"/>
- <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="32" name="A7XX_SP_NCTX_REG"/>
- <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
- <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
- <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
- <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
- <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
- <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
- <value value="39" name="A7XX_SP_INST_DATA"/>
- <value value="40" name="A7XX_SP_INST_DATA_1"/>
- <value value="41" name="A7XX_SP_LB_0_DATA"/>
- <value value="42" name="A7XX_SP_LB_1_DATA"/>
- <value value="43" name="A7XX_SP_LB_2_DATA"/>
- <value value="44" name="A7XX_SP_LB_3_DATA"/>
- <value value="45" name="A7XX_SP_LB_4_DATA"/>
- <value value="46" name="A7XX_SP_LB_5_DATA"/>
- <value value="47" name="A7XX_SP_LB_6_DATA"/>
- <value value="48" name="A7XX_SP_LB_7_DATA"/>
- <value value="49" name="A7XX_SP_CB_RAM"/>
- <value value="50" name="A7XX_SP_LB_13_DATA"/>
- <value value="51" name="A7XX_SP_LB_14_DATA"/>
- <value value="52" name="A7XX_SP_INST_TAG"/>
- <value value="53" name="A7XX_SP_INST_DATA_2"/>
- <value value="54" name="A7XX_SP_TMO_TAG"/>
- <value value="55" name="A7XX_SP_SMO_TAG"/>
- <value value="56" name="A7XX_SP_STATE_DATA"/>
- <value value="57" name="A7XX_SP_HWAVE_RAM"/>
- <value value="58" name="A7XX_SP_L0_INST_BUF"/>
- <value value="59" name="A7XX_SP_LB_8_DATA"/>
- <value value="60" name="A7XX_SP_LB_9_DATA"/>
- <value value="61" name="A7XX_SP_LB_10_DATA"/>
- <value value="62" name="A7XX_SP_LB_11_DATA"/>
- <value value="63" name="A7XX_SP_LB_12_DATA"/>
- <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
- <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
- <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
- <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
- <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
- <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
- <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
- <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
- <value value="82" name="A7XX_HLSQ_INST_RAM"/>
- <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
- <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
- <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
- <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
- <value value="93" name="A7XX_HLSQ_STPROC_META"/>
- <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
- <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
- <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
- <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
- <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
- <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
-</enum>
-
-<enum name="a6xx_debugbus_id">
- <value value="0x1" name="A6XX_DBGBUS_CP"/>
- <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
- <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
- <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
- <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
- <value value="0x6" name="A6XX_DBGBUS_DPM"/>
- <value value="0x7" name="A6XX_DBGBUS_TESS"/>
- <value value="0x8" name="A6XX_DBGBUS_PC"/>
- <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
- <value value="0xa" name="A6XX_DBGBUS_VPC"/>
- <value value="0xb" name="A6XX_DBGBUS_TSE"/>
- <value value="0xc" name="A6XX_DBGBUS_RAS"/>
- <value value="0xd" name="A6XX_DBGBUS_VSC"/>
- <value value="0xe" name="A6XX_DBGBUS_COM"/>
- <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
- <value value="0x11" name="A6XX_DBGBUS_A2D"/>
- <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
- <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
- <value value="0x14" name="A6XX_DBGBUS_RBP"/>
- <value value="0x15" name="A6XX_DBGBUS_DCS"/>
- <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
- <value value="0x17" name="A6XX_DBGBUS_CX"/>
- <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
- <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
- <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
- <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
- <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
- <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
- <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
- <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
- <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
- <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
- <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
- <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
- <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
- <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
- <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
- <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
- <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
- <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
- <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
- <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
- <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
- <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
- <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
- <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
- <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
- <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
- <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
- <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
- <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
- <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
- <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
- <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
- <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
-</enum>
-
-<enum name="a7xx_state_location">
- <value value="0" name="A7XX_HLSQ_STATE"/>
- <value value="1" name="A7XX_HLSQ_DP"/>
- <value value="2" name="A7XX_SP_TOP"/>
- <value value="3" name="A7XX_USPTP"/>
- <value value="4" name="A7XX_HLSQ_DP_STR"/>
-</enum>
-
-<enum name="a7xx_pipe">
- <value value="0" name="A7XX_PIPE_NONE"/>
- <value value="1" name="A7XX_PIPE_BR"/>
- <value value="2" name="A7XX_PIPE_BV"/>
- <value value="3" name="A7XX_PIPE_LPAC"/>
-</enum>
-
-<enum name="a7xx_cluster">
- <value value="0" name="A7XX_CLUSTER_NONE"/>
- <value value="1" name="A7XX_CLUSTER_FE"/>
- <value value="2" name="A7XX_CLUSTER_SP_VS"/>
- <value value="3" name="A7XX_CLUSTER_PC_VS"/>
- <value value="4" name="A7XX_CLUSTER_GRAS"/>
- <value value="5" name="A7XX_CLUSTER_SP_PS"/>
- <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
- <value value="7" name="A7XX_CLUSTER_PS"/>
-</enum>
-
-<enum name="a7xx_debugbus_id">
- <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
- <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
- <value value="3" name="A7XX_DBGBUS_RBBM"/>
- <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
- <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
- <value value="7" name="A7XX_DBGBUS_HLSQ"/>
- <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
- <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
- <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
- <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
- <value value="17" name="A7XX_DBGBUS_PC_BR"/>
- <value value="18" name="A7XX_DBGBUS_PC_BV"/>
- <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
- <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
- <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
- <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
- <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
- <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
- <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
- <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
- <value value="37" name="A7XX_DBGBUS_VSC"/>
- <value value="39" name="A7XX_DBGBUS_COM_0"/>
- <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
- <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
- <value value="47" name="A7XX_DBGBUS_UFC_0"/>
- <value value="48" name="A7XX_DBGBUS_UFC_1"/>
- <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
- <value value="59" name="A7XX_DBGBUS_DBGC"/>
- <value value="60" name="A7XX_DBGBUS_CX"/>
- <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
- <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
- <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
- <value value="66" name="A7XX_DBGBUS_LARC"/>
- <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
- <value value="70" name="A7XX_DBGBUS_RB_0"/>
- <value value="71" name="A7XX_DBGBUS_RB_1"/>
- <value value="72" name="A7XX_DBGBUS_RB_2"/>
- <value value="73" name="A7XX_DBGBUS_RB_3"/>
- <value value="74" name="A7XX_DBGBUS_RB_4"/>
- <value value="75" name="A7XX_DBGBUS_RB_5"/>
- <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="106" name="A7XX_DBGBUS_CCU_0"/>
- <value value="107" name="A7XX_DBGBUS_CCU_1"/>
- <value value="108" name="A7XX_DBGBUS_CCU_2"/>
- <value value="109" name="A7XX_DBGBUS_CCU_3"/>
- <value value="110" name="A7XX_DBGBUS_CCU_4"/>
- <value value="111" name="A7XX_DBGBUS_CCU_5"/>
- <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
- <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
- <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
- <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
- <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
- <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
- <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
- <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
- <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
- <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
- <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
- <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
- <value value="234" name="A7XX_DBGBUS_USP_0"/>
- <value value="235" name="A7XX_DBGBUS_USP_1"/>
- <value value="236" name="A7XX_DBGBUS_USP_2"/>
- <value value="237" name="A7XX_DBGBUS_USP_3"/>
- <value value="238" name="A7XX_DBGBUS_USP_4"/>
- <value value="239" name="A7XX_DBGBUS_USP_5"/>
- <value value="266" name="A7XX_DBGBUS_TP_0"/>
- <value value="267" name="A7XX_DBGBUS_TP_1"/>
- <value value="268" name="A7XX_DBGBUS_TP_2"/>
- <value value="269" name="A7XX_DBGBUS_TP_3"/>
- <value value="270" name="A7XX_DBGBUS_TP_4"/>
- <value value="271" name="A7XX_DBGBUS_TP_5"/>
- <value value="272" name="A7XX_DBGBUS_TP_6"/>
- <value value="273" name="A7XX_DBGBUS_TP_7"/>
- <value value="274" name="A7XX_DBGBUS_TP_8"/>
- <value value="275" name="A7XX_DBGBUS_TP_9"/>
- <value value="276" name="A7XX_DBGBUS_TP_10"/>
- <value value="277" name="A7XX_DBGBUS_TP_11"/>
- <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
- <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
- <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
- <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
- <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
- <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
- <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
- <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
- <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
- <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
- <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
- <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
- <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
- <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
- <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
- <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
- <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
- <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
- <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
- <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
- <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
- <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
- <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
- <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
- <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
- <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
- <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
- <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
- <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
-</enum>
-
-<enum name="a6xx_cp_perfcounter_select">
- <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="PERF_CP_MODE_SWITCH"/>
- <value value="10" name="PERF_CP_ZPASS_DONE"/>
- <value value="11" name="PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="PERF_CP_SQE_IDLE"/>
- <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="PERF_CP_PM4_DATA"/>
- <value value="46" name="PERF_CP_PM4_HEADERS"/>
- <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
-</enum>
-
-<enum name="a6xx_rbbm_perfcounter_select">
- <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="PERF_RBBM_COM_BUSY"/>
- <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a6xx_pc_perfcounter_select">
- <value value="0" name="PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
- <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
- <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
- <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
- <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="16" name="PERF_PC_INSTANCES"/>
- <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
- <value value="18" name="PERF_PC_DEAD_PRIM"/>
- <value value="19" name="PERF_PC_LIVE_PRIM"/>
- <value value="20" name="PERF_PC_VERTEX_HITS"/>
- <value value="21" name="PERF_PC_IA_VERTICES"/>
- <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
- <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
- <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
- <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
- <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
- <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
- <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
- <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
- <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
- <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
- <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
- <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
- <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
- <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
- <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
- <value value="38" name="PERF_PC_TSE_VERTEX"/>
- <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
-</enum>
-
-<enum name="a6xx_vfd_perfcounter_select">
- <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
-</enum>
-
-<enum name="a6xx_hlsq_perfcounter_select">
- <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
- <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
- <value value="8" name="PERF_HLSQ_QUADS"/>
- <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
- <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="PERF_HLSQ_PIXELS"/>
- <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
-</enum>
-
-<enum name="a6xx_vpc_perfcounter_select">
- <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
- <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
-</enum>
-
-<enum name="a6xx_tse_perfcounter_select">
- <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="PERF_TSE_CINVOCATION"/>
- <value value="16" name="PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a6xx_ras_perfcounter_select">
- <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="PERF_RAS_SUPER_TILES"/>
- <value value="5" name="PERF_RAS_8X4_TILES"/>
- <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="PERF_RAS_BLOCKS"/>
-</enum>
-
-<enum name="a6xx_uche_perfcounter_select">
- <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="PERF_UCHE_EVICTS"/>
- <value value="19" name="PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
-</enum>
-
-<enum name="a6xx_tp_perfcounter_select">
- <value value="0" name="PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
- <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
- <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="PERF_TP_QUADS_1D"/>
- <value value="19" name="PERF_TP_QUADS_2D"/>
- <value value="20" name="PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="PERF_TP_QUADS_3D"/>
- <value value="22" name="PERF_TP_QUADS_CUBE"/>
- <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
- <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
- <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
- <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
-</enum>
-
-<enum name="a6xx_sp_perfcounter_select">
- <value value="0" name="PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
- <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="PERF_SP_LM_ATOMICS"/>
- <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="PERF_SP_GM_ATOMICS"/>
- <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="PERF_SP_ICL1_MISSES"/>
- <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="PERF_SP_GPR_READ"/>
- <value value="58" name="PERF_SP_GPR_WRITE"/>
- <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="PERF_SP_WORKING_EU"/>
- <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
-</enum>
-
-<enum name="a6xx_rb_perfcounter_select">
- <value value="0" name="PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="PERF_RB_Z_READ"/>
- <value value="13" name="PERF_RB_Z_WRITE"/>
- <value value="14" name="PERF_RB_C_READ"/>
- <value value="15" name="PERF_RB_C_WRITE"/>
- <value value="16" name="PERF_RB_TOTAL_PASS"/>
- <value value="17" name="PERF_RB_Z_PASS"/>
- <value value="18" name="PERF_RB_Z_FAIL"/>
- <value value="19" name="PERF_RB_S_FAIL"/>
- <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="PERF_RB_3D_PIXELS"/>
- <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
-</enum>
-
-<enum name="a6xx_vsc_perfcounter_select">
- <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VSC_EOT_NUM"/>
- <value value="4" name="PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a6xx_ccu_perfcounter_select">
- <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
- <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
- <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
- <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="9" name="PERF_CCU_GMEM_READ"/>
- <value value="10" name="PERF_CCU_GMEM_WRITE"/>
- <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
- <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
- <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
- <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
- <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
- <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
- <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
- <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
- <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
- <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
- <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
- <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
- <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
- <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
- <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
- <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
- <value value="27" name="PERF_CCU_2D_RD_REQ"/>
- <value value="28" name="PERF_CCU_2D_WR_REQ"/>
-</enum>
-
-<enum name="a6xx_lrz_perfcounter_select">
- <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="PERF_LRZ_LRZ_READ"/>
- <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
- <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
- <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
- <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
- <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
-</enum>
-
-<enum name="a6xx_cmp_perfcounter_select">
- <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
- <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
- <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
- <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
- <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
- <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
- <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
- <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
- <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
- <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
- <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
-</enum>
-
-<!--
-Used in a6xx_2d_blit_cntl.. the value mostly seems to correlate to the
-component type/size, so I think it relates to internal format used for
-blending? The one exception is that 16b unorm and 32b float use the
-same value... maybe 16b unorm is uncommon enough that it was just easier
-to upconvert to 32b float internally?
-
- 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
-16b unorm: 4
-
-32b int: 7
-16b int: 6
- 8b int: 5
-
-32b float: 4
-16b float: 3
- -->
-<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
- <value value="0x7" name="R2D_INT32"/>
- <value value="0x6" name="R2D_INT16"/>
- <value value="0x5" name="R2D_INT8"/>
- <value value="0x4" name="R2D_FLOAT32"/>
- <value value="0x3" name="R2D_FLOAT16"/>
- <value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
-</enum>
-
-<enum name="a6xx_ztest_mode">
- <doc>Allow early z-test and early-lrz (if applicable)</doc>
- <value value="0x0" name="A6XX_EARLY_Z"/>
- <doc>Disable early z-test and early-lrz test (if applicable)</doc>
- <value value="0x1" name="A6XX_LATE_Z"/>
- <doc>
- A special mode that allows early-lrz test but disables
- early-z test. Which might sound a bit funny, since
- lrz-test happens before z-test. But as long as a couple
- conditions are maintained this allows using lrz-test in
- cases where fragment shader has kill/discard:
-
- 1) Disable lrz-write in cases where it is uncertain during
- binning pass that a fragment will pass. Ie. if frag
- shader has-kill, writes-z, or alpha/stencil test is
- enabled. (For correctness, lrz-write must be disabled
- when blend is enabled.) This is analogous to how a
- z-prepass works.
-
- 2) Disable lrz-write and test if a depth-test direction
- reversal is detected. Due to condition (1), the contents
- of the lrz buffer are a conservative estimation of the
- depth buffer during the draw pass. Meaning that geometry
- that we know for certain will not be visible will not pass
- lrz-test. But geometry which may be (or contributes to
- blend) will pass the lrz-test.
-
- This allows us to keep early-lrz-test in cases where the frag
- shader does not write-z (ie. we know the z-value before FS)
- and does not have side-effects (image/ssbo writes, etc), but
- does have kill/discard. Which turns out to be a common
- enough case that it is useful to keep early-lrz test against
- the conservative lrz buffer to discard fragments that we
- know will definitely not be visible.
- </doc>
- <value value="0x2" name="A6XX_EARLY_LRZ_LATE_Z"/>
- <doc>Not a real hw value, used internally by mesa</doc>
- <value value="0x3" name="A6XX_INVALID_ZTEST"/>
-</enum>
-
-<enum name="a6xx_tess_spacing">
- <value value="0x0" name="TESS_EQUAL"/>
- <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
- <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
-</enum>
-<enum name="a6xx_tess_output">
- <value value="0x0" name="TESS_POINTS"/>
- <value value="0x1" name="TESS_LINES"/>
- <value value="0x2" name="TESS_CW_TRIS"/>
- <value value="0x3" name="TESS_CCW_TRIS"/>
-</enum>
-
-<enum name="a7xx_cp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
- <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
- <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
- <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
- <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
- <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
- <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
- <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
- <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
- <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
- <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
- <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
- <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
- <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
- <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
- <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
- <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
- <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
- <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
- <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
- <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
- <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
- <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
- <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
- <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
- <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
- <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
-</enum>
-
-<enum name="a7xx_rbbm_perfcounter_select">
- <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
- <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a7xx_pc_perfcounter_select">
- <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="A7XX_PERF_PC_RESERVED"/>
- <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
- <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
- <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
- <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
- <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
- <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
- <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
- <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
- <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
- <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
- <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
- <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
- <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
- <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
- <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
- <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
- <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
- <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
- <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
- <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
- <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
- <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
- <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
- <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
- <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
- <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
- <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
- <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
- <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
- <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
- <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
- <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
- <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
- <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
- <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
-</enum>
-
-<enum name="a7xx_vfd_perfcounter_select">
- <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
- <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
- <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
-</enum>
-
-<enum name="a7xx_hlsq_perfcounter_select">
- <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
- <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
- <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
- <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
- <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
- <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
- <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
- <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
- <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
- <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
- <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
- <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
- <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
- <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
- <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
- <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
- <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
- <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
- <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
- <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
- <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
- <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
- <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
- <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
- <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
- <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
- <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
- <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
- <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
- <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
- <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
- <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
- <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
- <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
- <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
- <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
- <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
- <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
-</enum>
-
-<enum name="a7xx_vpc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
- <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
- <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
- <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
- <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
- <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
- <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
- <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
- <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
- <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
- <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
- <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
- <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
- <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
- <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
- <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
- <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
-</enum>
-
-<enum name="a7xx_tse_perfcounter_select">
- <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
- <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a7xx_ras_perfcounter_select">
- <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
- <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
- <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
- <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
- <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
- <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
- <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
- <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
- <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
- <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
- <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
- <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
- <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
- <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
- <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
- <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
- <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
- <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
- <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
- <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
-
-</enum>
-
-<enum name="a7xx_uche_perfcounter_select">
- <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
- <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
- <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
- <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
- <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
- <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
- <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
- <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
- <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
- <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
- <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
- <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
- <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
- <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
- <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
- <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
- <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
- <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
- <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
- <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
- <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
-</enum>
-
-<enum name="a7xx_tp_perfcounter_select">
- <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
- <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
- <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
- <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
- <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
- <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
- <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
- <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
- <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
- <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
- <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
- <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
- <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
- <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
- <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
- <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
- <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
- <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
- <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
- <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
- <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
- <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
- <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
- <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
- <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
- <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
- <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
- <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
- <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
- <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
- <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
- <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
- <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
- <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
- <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
- <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
- <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
- <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
- <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
- <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
- <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
- <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
- <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
- <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
- <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
- <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
- <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
- <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
- <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
- <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
- <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
- <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
- <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
- <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
- <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
- <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
- <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
- <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
- <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
- <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
- <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
- <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
- <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
- <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
- <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
- <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
- <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
- <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
- <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
- <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
- <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
- <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
- <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
- <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
- <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
- <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
- <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
- <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
- <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
-</enum>
-
-<enum name="a7xx_sp_perfcounter_select">
- <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
- <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
- <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
- <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
- <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
- <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
- <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
- <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
- <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
- <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
- <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
- <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
- <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
- <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
- <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
- <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
- <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
- <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
- <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
- <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
- <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
- <value value="99" name="A7XX_PERF_SP_QUADS"/>
- <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
- <value value="101" name="A7XX_PERF_SP_PIXELS"/>
- <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
- <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
- <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
- <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
- <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
- <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
- <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
- <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
- <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
- <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
- <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
- <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
- <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
- <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
- <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
- <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
- <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
- <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
- <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
- <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
- <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
- <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
- <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
- <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
- <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
- <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
- <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
- <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
- <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
- <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
- <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
- <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
- <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
- <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
- <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
- <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
- <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
- <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
- <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
- <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
- <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
- <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
- <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
- <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
- <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
- <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
- <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
- <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
- <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
-</enum>
-
-<enum name="a7xx_rb_perfcounter_select">
- <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="A7XX_PERF_RB_Z_READ"/>
- <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
- <value value="14" name="A7XX_PERF_RB_C_READ"/>
- <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
- <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
- <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
- <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
- <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
- <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
- <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
- <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
- <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
- <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
- <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
- <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
- <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
-</enum>
-
-<enum name="a7xx_vsc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
- <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a7xx_ccu_perfcounter_select">
- <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
- <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
- <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
- <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
- <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
- <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
- <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
- <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
- <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
- <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
- <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
- <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
- <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
- <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
- <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
- <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
- <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
-</enum>
-
-<enum name="a7xx_lrz_perfcounter_select">
- <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
- <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
- <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
- <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
- <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
- <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
-</enum>
-
-<enum name="a7xx_cmp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
- <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
- <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
- <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
- <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
- <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
- <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
- <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
- <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
- <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
- <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
- <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
- <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
- <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
- <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
- <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
- <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
- <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
- <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
- <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
- <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
-</enum>
-
-<enum name="a7xx_gbif_perfcounter_select">
- <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
- <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
- <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
- <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
- <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
- <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
- <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
- <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
- <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
- <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
- <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
- <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
- <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
- <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
- <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
- <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
- <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
- <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
- <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
- <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
- <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
- <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
- <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
- <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
- <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
- <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
- <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
- <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
- <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
- <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
- <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
- <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
- <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
- <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
- <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
- <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
- <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
- <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
- <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
- <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
- <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
- <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
- <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
- <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
- <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
- <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
- <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
- <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
- <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
- <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
- <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
- <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
- <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
- <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
- <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
- <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
- <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
- <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
-</enum>
-
-<enum name="a7xx_ufc_perfcounter_select">
- <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
- <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
- <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
- <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
- <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
- <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
- <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
- <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
- <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
- <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
- <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
- <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
- <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
- <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
- <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
- <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
- <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
- <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
- <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
- <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
- <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
- <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
- <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
- <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
- <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
- <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
- <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
- <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
- <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
- <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
- <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
- <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
- <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
- <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
- <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
- <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
- <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
- <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
- <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
- <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
- <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
- <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
- <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
- <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
- <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
- <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
- <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
- <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
- <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
- <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
- <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
- <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
- <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
- <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
- <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
- <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
- <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
-</enum>
-
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -2371,7 +177,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/>
<array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/>
<array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/>
- <reg64 offset="0x0900" name="CP_CRASH_SCRIPT_BASE"/>
+ <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE"/>
<reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/>
<reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/>
<reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/>
@@ -2400,22 +206,22 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x0934" name="CP_VSD_BASE"/>
- <bitset name="a6xx_roq_stat" inline="yes">
+ <bitset name="a6xx_roq_status" inline="yes">
<bitfield name="RPTR" low="0" high="9"/>
<bitfield name="WPTR" low="16" high="25"/>
</bitset>
- <reg32 offset="0x0939" name="CP_ROQ_RB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093a" name="CP_ROQ_IB1_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093b" name="CP_ROQ_IB2_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093c" name="CP_ROQ_SDS_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093d" name="CP_ROQ_MRB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093e" name="CP_ROQ_VSD_STAT" type="a6xx_roq_stat"/>
-
- <reg32 offset="0x0943" name="CP_IB1_DWORDS"/>
- <reg32 offset="0x0944" name="CP_IB2_DWORDS"/>
- <reg32 offset="0x0945" name="CP_SDS_DWORDS"/>
- <reg32 offset="0x0946" name="CP_MRB_DWORDS"/>
- <reg32 offset="0x0947" name="CP_VSD_DWORDS"/>
+ <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status"/>
+
+ <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE"/>
+ <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE"/>
+ <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE"/>
+ <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE"/>
+ <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE"/>
<reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB">
<doc>number of remaining dwords incl current dword being consumed?</doc>
@@ -2451,6 +257,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x098D" name="CP_AHB_CNTL"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/>
+ <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/>
@@ -2468,8 +275,8 @@ to upconvert to 32b float internally?
<reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/>
<reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9a" name="CP_RESOURCE_TBL_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9b" name="CP_RESOURCE_TBL_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX-"/>
<reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/>
<reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/>
@@ -2619,28 +426,17 @@ to upconvert to 32b float internally?
vertices in, number of primnitives assembled etc.
-->
- <reg32 offset="0x0540" name="RBBM_PRIMCTR_0_LO"/> <!-- vs vertices in -->
- <reg32 offset="0x0541" name="RBBM_PRIMCTR_0_HI"/>
- <reg32 offset="0x0542" name="RBBM_PRIMCTR_1_LO"/> <!-- vs primitives out -->
- <reg32 offset="0x0543" name="RBBM_PRIMCTR_1_HI"/>
- <reg32 offset="0x0544" name="RBBM_PRIMCTR_2_LO"/> <!-- hs vertices in -->
- <reg32 offset="0x0545" name="RBBM_PRIMCTR_2_HI"/>
- <reg32 offset="0x0546" name="RBBM_PRIMCTR_3_LO"/> <!-- hs patches out -->
- <reg32 offset="0x0547" name="RBBM_PRIMCTR_3_HI"/>
- <reg32 offset="0x0548" name="RBBM_PRIMCTR_4_LO"/> <!-- dss vertices in -->
- <reg32 offset="0x0549" name="RBBM_PRIMCTR_4_HI"/>
- <reg32 offset="0x054a" name="RBBM_PRIMCTR_5_LO"/> <!-- ds primitives out -->
- <reg32 offset="0x054b" name="RBBM_PRIMCTR_5_HI"/>
- <reg32 offset="0x054c" name="RBBM_PRIMCTR_6_LO"/> <!-- gs primitives in -->
- <reg32 offset="0x054d" name="RBBM_PRIMCTR_6_HI"/>
- <reg32 offset="0x054e" name="RBBM_PRIMCTR_7_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x054f" name="RBBM_PRIMCTR_7_HI"/>
- <reg32 offset="0x0550" name="RBBM_PRIMCTR_8_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x0551" name="RBBM_PRIMCTR_8_HI"/>
- <reg32 offset="0x0552" name="RBBM_PRIMCTR_9_LO"/> <!-- raster primitives in -->
- <reg32 offset="0x0553" name="RBBM_PRIMCTR_9_HI"/>
- <reg32 offset="0x0554" name="RBBM_PRIMCTR_10_LO"/>
- <reg32 offset="0x0555" name="RBBM_PRIMCTR_10_HI"/>
+ <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES"/>
+ <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES"/>
+ <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS"/>
+ <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS"/>
+ <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS"/>
+ <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS"/>
+ <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES"/>
+ <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS"/>
+ <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES"/>
+ <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS"/>
+ <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS"/>
<reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/>
<reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/>
@@ -2779,7 +575,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/>
<reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/>
<reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/>
- <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE"/>
+ <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE" variants="A6XX"/>
<reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-">
<bitfield name="TXDONE" pos="0" type="boolean"/>
</reg32>
@@ -2840,7 +636,7 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/>
<reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/>
- <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/>
+ <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2" variants="A6XX"/>
<reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX">
<doc>
Set to true when binning, isn't changed afterwards
@@ -2936,8 +732,8 @@ to upconvert to 32b float internally?
<bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/>
</reg32>
- <reg64 offset="0x0c03" name="VSC_DRAW_STRM_SIZE_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c06" name="VSC_BIN_COUNT" usage="rp_blit">
+ <reg64 offset="0x0c03" name="VSC_SIZE_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c06" name="VSC_EXPANDED_BIN_CNTL" usage="rp_blit">
<bitfield name="NX" low="1" high="10" type="uint"/>
<bitfield name="NY" low="11" high="20" type="uint"/>
</reg32>
@@ -2967,14 +763,14 @@ to upconvert to 32b float internally?
LIMIT is set to PITCH - 64, to make room for a bit of overflow
-->
- <reg64 offset="0x0c30" name="VSC_PRIM_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c32" name="VSC_PRIM_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c33" name="VSC_PRIM_STRM_LIMIT" usage="cmd"/>
- <reg64 offset="0x0c34" name="VSC_DRAW_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c36" name="VSC_DRAW_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c37" name="VSC_DRAW_STRM_LIMIT" usage="cmd"/>
-
- <array offset="0x0c38" name="VSC_STATE" stride="1" length="32" usage="rp_blit">
+ <reg64 offset="0x0c30" name="VSC_PIPE_DATA_PRIM_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c32" name="VSC_PIPE_DATA_PRIM_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c33" name="VSC_PIPE_DATA_PRIM_LENGTH" usage="cmd"/>
+ <reg64 offset="0x0c34" name="VSC_PIPE_DATA_DRAW_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c36" name="VSC_PIPE_DATA_DRAW_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c37" name="VSC_PIPE_DATA_DRAW_LENGTH" usage="cmd"/>
+
+ <array offset="0x0c38" name="VSC_CHANNEL_VISIBILITY" stride="1" length="32" usage="rp_blit">
<doc>
Seems to be a bitmap of which tiles mapped to the VSC
pipe contain geometry.
@@ -2985,7 +781,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c58" name="VSC_PRIM_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c58" name="VSC_PIPE_DATA_PRIM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC_PRIM_STRM
buffer.
@@ -2993,10 +789,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c78" name="VSC_DRAW_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c78" name="VSC_PIPE_DATA_DRAW_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC pipe, ie.
- same thing that is written out to VSC_DRAW_STRM_SIZE_ADDRESS_LO/HI
+ same thing that is written out to VSC_SIZE_BASE
</doc>
<reg32 offset="0x0" name="REG"/>
</array>
@@ -3028,17 +824,17 @@ to upconvert to 32b float internally?
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_xs_cl_cntl" inline="yes">
+ <bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_VS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_DS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_GS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_MAX_LAYER_INDEX" low="0" high="10" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
- <reg32 offset="0x8005" name="GRAS_CNTL" usage="rp_blit">
- <!-- see also RB_RENDER_CONTROL0 -->
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3067,7 +863,7 @@ to upconvert to 32b float internally?
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -3075,7 +871,7 @@ to upconvert to 32b float internally?
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_Z_CLAMP" stride="2" length="16" usage="rp_blit">
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
@@ -3124,7 +920,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
- <bitfield name="SHIFTAMOUNT" low="1" high="2"/>
+ <enum name="a6xx_shift_amount">
+ <value value="0" name="NO_SHIFT"/>
+ <value value="1" name="HALF_PIXEL_SHIFT"/>
+ <value value="2" name="FULL_PIXEL_SHIFT"/>
+ </enum>
+ <bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
</reg32>
@@ -3133,13 +934,13 @@ to upconvert to 32b float internally?
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_layer_cntl" inline="yes">
+ <bitset name="a6xx_gras_us_xs_siv_cntl" inline="yes">
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_VS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_GS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_DS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
<!-- 0x809e/0x809f invalid -->
<enum name="a6xx_sequenced_thread_dist">
@@ -3213,13 +1014,13 @@ to upconvert to 32b float internally?
<enum name="a6xx_lrz_feedback_mask">
<value value="0x0" name="LRZ_FEEDBACK_NONE"/>
<value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
- <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_Z_LATE_Z"/>
<!-- We don't have a flag type and this flags combination is often used -->
- <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_Z_LATE_Z"/>
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3235,22 +1036,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK27" pos="27"/>
</reg32>
- <reg32 offset="0x80a2" name="GRAS_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
</reg32>
- <reg32 offset="0x80a3" name="GRAS_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <bitset name="a6xx_sample_config" inline="yes">
+ <bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
<bitfield name="LOCATION_ENABLE" pos="1" type="boolean"/>
</bitset>
- <bitset name="a6xx_sample_locations" inline="yes">
+ <bitset name="a6xx_programmable_msaa_pos" inline="yes">
<bitfield name="SAMPLE_0_X" low="0" high="3" radix="4" type="fixed"/>
<bitfield name="SAMPLE_0_Y" low="4" high="7" radix="4" type="fixed"/>
<bitfield name="SAMPLE_1_X" low="8" high="11" radix="4" type="fixed"/>
@@ -3261,9 +1062,9 @@ to upconvert to 32b float internally?
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
@@ -3286,13 +1087,36 @@ to upconvert to 32b float internally?
<reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x80f4 - 0x80fa are used for VK_KHR_fragment_shading_rate -->
- <reg64 offset="0x80f4" name="GRAS_UNKNOWN_80F4" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f5" name="GRAS_UNKNOWN_80F5" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f6" name="GRAS_UNKNOWN_80F6" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f8" name="GRAS_UNKNOWN_80F8" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f9" name="GRAS_UNKNOWN_80F9" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80fa" name="GRAS_UNKNOWN_80FA" variants="A7XX-" usage="cmd"/>
+ <enum name="a6xx_fsr_combiner">
+ <value value="0" name="FSR_COMBINER_OP_KEEP"/>
+ <value value="1" name="FSR_COMBINER_OP_REPLACE"/>
+ <value value="2" name="FSR_COMBINER_OP_MIN"/>
+ <value value="3" name="FSR_COMBINER_OP_MAX"/>
+ <value value="4" name="FSR_COMBINER_OP_MUL"/>
+ </enum>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
+ <bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
+ <bitfield name="COMBINER_OP_1" low="5" high="7" type="a6xx_fsr_combiner"/>
+ <bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="LAYERED" pos="0" type="boolean"/>
+ <bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
+ </reg32>
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ <bitfield name="WIDTH" low="0" high="15" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
+ <bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
+ </reg32>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -3313,7 +1137,7 @@ to upconvert to 32b float internally?
</doc>
<bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
- <bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
<bitfield name="DIR" low="6" high="7" type="a6xx_lrz_dir_status"/>
<doc>
@@ -3339,14 +1163,13 @@ to upconvert to 32b float internally?
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
</reg32>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUF_INFO_0" usage="rp_blit">
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
</reg32>
<reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
<reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
- <!-- TODO: fix the shr fields -->
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="10" high="28" shr="4" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
</reg32>
<!--
@@ -3381,18 +1204,18 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
<!-- 0x8108 invalid -->
- <reg32 offset="0x8109" name="GRAS_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<!--
LRZ buffer represents a single array layer + mip level, and there is
a single buffer per depth image. Thus to reuse LRZ between renderpasses
it is necessary to track the depth view used in the past renderpass, which
- GRAS_LRZ_DEPTH_VIEW is for.
- GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_DEPTH_VIEW is equal to
+ GRAS_LRZ_VIEW_INFO is for.
+ GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_VIEW_INFO is equal to
the value stored in the LRZ buffer, if not - LRZ is disabled.
-->
- <reg32 offset="0x810a" name="GRAS_LRZ_DEPTH_VIEW" usage="cmd">
+ <reg32 offset="0x810a" name="GRAS_LRZ_VIEW_INFO" usage="cmd">
<bitfield name="BASE_LAYER" low="0" high="10" type="uint"/>
<bitfield name="LAYER_COUNT" low="16" high="26" type="uint"/>
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
@@ -3408,7 +1231,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
<reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
@@ -3430,7 +1253,7 @@ to upconvert to 32b float internally?
<value value="0x5" name="ROTATE_VFLIP"/>
</enum>
- <bitset name="a6xx_2d_blit_cntl" inline="yes">
+ <bitset name="a6xx_a2d_bit_cntl" inline="yes">
<bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
<bitfield name="OVERWRITEEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="6"/>
@@ -3447,22 +1270,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_2D_SRC_TL_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_2D_SRC_BR_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_2D_SRC_TL_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_2D_SRC_BR_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_2D_DST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_2D_DST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_2D_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_2D_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- 0x840c-0x85ff invalid -->
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
@@ -3481,7 +1304,7 @@ to upconvert to 32b float internally?
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3490,7 +1313,7 @@ to upconvert to 32b float internally?
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3501,8 +1324,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
@@ -3515,15 +1337,14 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
<reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
@@ -3536,16 +1357,16 @@ to upconvert to 32b float internally?
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x8804" name="RB_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x8805" name="RB_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x8806" name="RB_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x8804" name="RB_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8805" name="RB_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x8806" name="RB_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<!-- 0x8807-0x8808 invalid -->
<!--
note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
name comes from kernel and is probably right)
-->
- <reg32 offset="0x8809" name="RB_RENDER_CONTROL0" usage="rp_blit">
- <!-- see also GRAS_CNTL -->
+ <reg32 offset="0x8809" name="RB_INTERP_CNTL" usage="rp_blit">
+ <!-- see also GRAS_CL_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3555,7 +1376,7 @@ to upconvert to 32b float internally?
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0x880a" name="RB_RENDER_CONTROL1" usage="rp_blit">
+ <reg32 offset="0x880a" name="RB_PS_INPUT_CNTL" usage="rp_blit">
<!-- enable bits for various FS sysvalue regs: -->
<bitfield name="SAMPLEMASK" pos="0" type="boolean"/>
<bitfield name="POSTDEPTHCOVERAGE" pos="1" type="boolean"/>
@@ -3567,16 +1388,16 @@ to upconvert to 32b float internally?
<bitfield name="FOVEATION" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="0x880b" name="RB_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0x880b" name="RB_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_WRITES_Z" pos="1" type="boolean"/>
<bitfield name="FRAG_WRITES_SAMPMASK" pos="2" type="boolean"/>
<bitfield name="FRAG_WRITES_STENCILREF" pos="3" type="boolean"/>
</reg32>
- <reg32 offset="0x880c" name="RB_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0x880c" name="RB_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <reg32 offset="0x880d" name="RB_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0x880d" name="RB_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -3608,7 +1429,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x8810" name="RB_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8810" name="RB_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/>
@@ -3672,18 +1493,18 @@ to upconvert to 32b float internally?
<reg32 offset="0x7" name="BASE_GMEM" low="12" high="31" shr="12"/>
</array>
- <reg32 offset="0x8860" name="RB_BLEND_RED_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8861" name="RB_BLEND_GREEN_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8862" name="RB_BLEND_BLUE_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8863" name="RB_BLEND_ALPHA_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8864" name="RB_ALPHA_CONTROL" usage="cmd">
+ <reg32 offset="0x8860" name="RB_BLEND_CONSTANT_RED_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8861" name="RB_BLEND_CONSTANT_GREEN_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8862" name="RB_BLEND_CONSTANT_BLUE_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8863" name="RB_BLEND_CONSTANT_ALPHA_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8864" name="RB_ALPHA_TEST_CNTL" usage="cmd">
<bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
<bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
<bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
</reg32>
<reg32 offset="0x8865" name="RB_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
- <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="BLEND_READS_DEST" low="0" high="7"/>
<bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
<bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
@@ -3726,12 +1547,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8873" name="RB_DEPTH_BUFFER_PITCH" low="0" high="13" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8874" name="RB_DEPTH_BUFFER_ARRAY_PITCH" low="0" high="27" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8875" name="RB_DEPTH_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8877" name="RB_DEPTH_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8877" name="RB_DEPTH_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8878" name="RB_Z_BOUNDS_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x8879" name="RB_Z_BOUNDS_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8878" name="RB_DEPTH_BOUND_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8879" name="RB_DEPTH_BOUND_MAX" type="float" usage="rp_blit"/>
<!-- 0x887a-0x887f invalid -->
- <reg32 offset="0x8880" name="RB_STENCIL_CONTROL" usage="rp_blit">
+ <reg32 offset="0x8880" name="RB_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
<bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
<!--
@@ -3753,11 +1574,11 @@ to upconvert to 32b float internally?
<reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
<bitfield name="TILEMODE" low="2" high="3" type="a6xx_tile_mode"/>
@@ -3765,22 +1586,22 @@ to upconvert to 32b float internally?
<reg32 offset="0x8882" name="RB_STENCIL_BUFFER_PITCH" low="0" high="11" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8883" name="RB_STENCIL_BUFFER_ARRAY_PITCH" low="0" high="23" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8884" name="RB_STENCIL_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8886" name="RB_STENCIL_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8887" name="RB_STENCILREF" usage="rp_blit">
+ <reg32 offset="0x8886" name="RB_STENCIL_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8887" name="RB_STENCIL_REF_CNTL" usage="rp_blit">
<bitfield name="REF" low="0" high="7"/>
<bitfield name="BFREF" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8888" name="RB_STENCILMASK" usage="rp_blit">
+ <reg32 offset="0x8888" name="RB_STENCIL_MASK" usage="rp_blit">
<bitfield name="MASK" low="0" high="7"/>
<bitfield name="BFMASK" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8889" name="RB_STENCILWRMASK" usage="rp_blit">
+ <reg32 offset="0x8889" name="RB_STENCIL_WRITE_MASK" usage="rp_blit">
<bitfield name="WRMASK" low="0" high="7"/>
<bitfield name="BFWRMASK" low="8" high="15"/>
</reg32>
<!-- 0x888a-0x888f invalid -->
<reg32 offset="0x8890" name="RB_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8891" name="RB_SAMPLE_COUNT_CONTROL" usage="cmd">
+ <reg32 offset="0x8891" name="RB_SAMPLE_COUNTER_CNTL" usage="cmd">
<bitfield name="DISABLE" pos="0" type="boolean"/>
<bitfield name="COPY" pos="1" type="boolean"/>
</reg32>
@@ -3791,27 +1612,27 @@ to upconvert to 32b float internally?
<reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_Z_CLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_Z_CLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
<!-- 0x88c2-0x88cf invalid-->
- <reg32 offset="0x88d0" name="RB_UNKNOWN_88D0" usage="rp_blit">
+ <reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
<bitfield name="UNK16" low="16" high="26"/>
</reg32>
- <reg32 offset="0x88d1" name="RB_BLIT_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d2" name="RB_BLIT_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_BIN_CONTROL2" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
- <reg32 offset="0x88d4" name="RB_WINDOW_OFFSET2" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d5" name="RB_BLIT_GMEM_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x88d4" name="RB_RESOLVE_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d5" name="RB_RESOLVE_GMEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
</reg32>
- <reg32 offset="0x88d6" name="RB_BLIT_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x88d6" name="RB_RESOLVE_GMEM_BUFFER_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
<!-- s/DST_FORMAT/DST_INFO/ probably: -->
- <reg32 offset="0x88d7" name="RB_BLIT_DST_INFO" usage="rp_blit">
+ <reg32 offset="0x88d7" name="RB_RESOLVE_SYSTEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="FLAGS" pos="2" type="boolean"/>
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
@@ -3820,25 +1641,31 @@ to upconvert to 32b float internally?
<bitfield name="UNK15" pos="15" type="boolean"/>
<bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
- <reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88d8" name="RB_RESOLVE_SYSTEM_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88da" name="RB_RESOLVE_SYSTEM_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- array-pitch is size of layer -->
- <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x88dc" name="RB_BLIT_FLAG_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_BLIT_FLAG_DST_PITCH" usage="rp_blit">
+ <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88df" name="RB_BLIT_CLEAR_COLOR_DW0" usage="rp_blit"/>
- <reg32 offset="0x88e0" name="RB_BLIT_CLEAR_COLOR_DW1" usage="rp_blit"/>
- <reg32 offset="0x88e1" name="RB_BLIT_CLEAR_COLOR_DW2" usage="rp_blit"/>
- <reg32 offset="0x88e2" name="RB_BLIT_CLEAR_COLOR_DW3" usage="rp_blit"/>
+ <reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x88e1" name="RB_RESOLVE_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x88e2" name="RB_RESOLVE_CLEAR_COLOR_DW3" usage="rp_blit"/>
+
+ <enum name="a6xx_blit_event_type">
+ <value value="0x0" name="BLIT_EVENT_STORE"/>
+ <value value="0x1" name="BLIT_EVENT_STORE_AND_CLEAR"/>
+ <value value="0x2" name="BLIT_EVENT_CLEAR"/>
+ <value value="0x3" name="BLIT_EVENT_LOAD"/>
+ </enum>
<!-- seems somewhat similar to what we called RB_CLEAR_CNTL on a5xx: -->
- <reg32 offset="0x88e3" name="RB_BLIT_INFO" usage="rp_blit">
- <bitfield name="UNK0" pos="0" type="boolean"/> <!-- s8 stencil restore/clear? But also color restore? -->
- <bitfield name="GMEM" pos="1" type="boolean"/> <!-- set for restore and clear to gmem? -->
+ <reg32 offset="0x88e3" name="RB_RESOLVE_OPERATION" usage="rp_blit">
+ <bitfield name="TYPE" low="0" high="1" type="a6xx_blit_event_type"/>
<bitfield name="SAMPLE_0" pos="2" type="boolean"/> <!-- takes sample 0 instead of averaging -->
<bitfield name="DEPTH" pos="3" type="boolean"/> <!-- z16/z32/z24s8/x24x8 clear or resolve? -->
<doc>
@@ -3853,16 +1680,20 @@ to upconvert to 32b float internally?
<!-- set when this is the last resolve on a650+ -->
<bitfield name="LAST" low="8" high="9"/>
<!--
- a618 GLES: color render target number being resolved for RM6_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
- a618 VK: 0x8 for depth RM6_RESOLVE, 0x9 for separate stencil, 0 otherwise.
-
- We believe this is related to concurrent resolves
+ a618 GLES: color render target number being resolved for CCU_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
+ a618 VK: 0x8 for depth CCU_RESOLVE, 0x9 for separate stencil, 0 otherwise.
+ a7xx VK: 0x8 for depth, 0x9 for separate stencil, 0x0 to 0x7 used for concurrent resolves of color render
+ targets inside a given resolve group.
-->
<bitfield name="BUFFER_ID" low="12" high="15"/>
</reg32>
- <reg32 offset="0x88e4" name="RB_UNKNOWN_88E4" variants="A7XX-" usage="rp_blit">
- <!-- Value conditioned based on predicate, changed before blits -->
- <bitfield name="UNK0" pos="0" type="boolean"/>
+
+ <enum name="a7xx_blit_clear_mode">
+ <value value="0x0" name="CLEAR_MODE_SYSMEM"/>
+ <value value="0x1" name="CLEAR_MODE_GMEM"/>
+ </enum>
+ <reg32 offset="0x88e4" name="RB_CLEAR_TARGET" variants="A7XX-" usage="rp_blit">
+ <bitfield name="CLEAR_MODE" pos="0" type="a7xx_blit_clear_mode"/>
</reg32>
<enum name="a6xx_ccu_cache_size">
@@ -3871,7 +1702,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/>
<value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/>
</enum>
- <reg32 offset="0x88e5" name="RB_CCU_CNTL2" variants="A7XX-" usage="cmd">
+ <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/>
<bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/>
<bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
@@ -3895,7 +1726,13 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88f4" name="RB_UNKNOWN_88F4" low="0" high="2"/>
+
+ <reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
+ <bitfield name="UNK2" pos="2" type="boolean"/>
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
+ </reg32>
<!-- Connected to VK_EXT_fragment_density_map? -->
<reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
<!-- 0x88f6-0x88ff invalid -->
@@ -3906,7 +1743,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <array offset="0x8903" name="RB_MRT_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
+ <array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
<reg32 offset="2" name="PITCH">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
@@ -3915,10 +1752,10 @@ to upconvert to 32b float internally?
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
- RB_SAMPLE_COUNT_ADDR register is used up to (and including) a730. After that
+ RB_SAMPLE_COUNTER_BASE register is used up to (and including) a730. After that
the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT.
</doc>
- <reg64 offset="0x8927" name="RB_SAMPLE_COUNT_ADDR" type="waddress" align="16" usage="cmd"/>
+ <reg64 offset="0x8927" name="RB_SAMPLE_COUNTER_BASE" type="waddress" align="16" usage="cmd"/>
<!-- 0x8929-0x89ff invalid -->
<!-- TODO: there are some registers in the 0x8a00-0x8bff range -->
@@ -3932,10 +1769,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
- <reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
+ <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8c01" name="RB_A2D_PIXEL_CNTL" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_src_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_src_texture_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3954,7 +1791,7 @@ to upconvert to 32b float internally?
<bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
</bitset>
- <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_dest_buffer_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3965,26 +1802,26 @@ to upconvert to 32b float internally?
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
- <reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_A2D_DEST_BUFFER_INFO" type="a6xx_a2d_dest_buffer_info" usage="rp_blit"/>
+ <reg64 offset="0x8c18" name="RB_A2D_DEST_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1a" name="RB_A2D_DEST_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
- <reg64 offset="0x8c1b" name="RB_2D_DST_PLANE1" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1d" name="RB_2D_DST_PLANE_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x8c1e" name="RB_2D_DST_PLANE2" type="waddress" align="64" usage="rp_blit"/>
+ <reg64 offset="0x8c1b" name="RB_A2D_DEST_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1d" name="RB_A2D_DEST_BUFFER_PITCH_1" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
- <reg64 offset="0x8c20" name="RB_2D_DST_FLAGS" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_2D_DST_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
- <reg64 offset="0x8c23" name="RB_2D_DST_FLAGS_PLANE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c25" name="RB_2D_DST_FLAGS_PLANE_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- TODO: 0x8c26-0x8c33 are all full 32-bit registers -->
<!-- unlike a5xx, these are per channel values rather than packed -->
- <reg32 offset="0x8c2c" name="RB_2D_SRC_SOLID_C0" usage="rp_blit"/>
- <reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
- <reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
- <reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
+ <reg32 offset="0x8c2c" name="RB_A2D_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x8c2d" name="RB_A2D_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x8c2e" name="RB_A2D_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x8c2f" name="RB_A2D_CLEAR_COLOR_DW3" usage="rp_blit"/>
<reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
@@ -3996,7 +1833,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff -->
<reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
<!-- 0x02080000 in GMEM, zero otherwise? -->
- <reg32 offset="0x8e06" name="RB_UNKNOWN_8E06" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8e06" name="RB_CCU_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A6XX">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
@@ -4017,10 +1854,21 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
<!--TODO: valid mask 0xfffffc1f -->
</reg32>
+ <enum name="a7xx_concurrent_resolve_mode">
+ <value value="0x0" name="CONCURRENT_RESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_RESOLVE_MODE_1"/>
+ <value value="0x2" name="CONCURRENT_RESOLVE_MODE_2"/>
+ </enum>
+ <enum name="a7xx_concurrent_unresolve_mode">
+ <value value="0x0" name="CONCURRENT_UNRESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_UNRESOLVE_MODE_PARTIAL"/>
+ <value value="0x3" name="CONCURRENT_UNRESOLVE_MODE_FULL"/>
+ </enum>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A7XX-">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
- <bitfield name="CONCURRENT_RESOLVE" pos="2" type="boolean"/>
- <!-- rest of the bits were moved to RB_CCU_CNTL2 -->
+ <bitfield name="CONCURRENT_RESOLVE_MODE" low="2" high="3" type="a7xx_concurrent_resolve_mode"/>
+ <bitfield name="CONCURRENT_UNRESOLVE_MODE" low="5" high="6" type="a7xx_concurrent_unresolve_mode"/>
+ <!-- rest of the bits were moved to RB_CCU_CACHE_CNTL -->
</reg32>
<reg32 offset="0x8e08" name="RB_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
@@ -4046,9 +1894,9 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/>
<!-- 0x8e3e-0x8e4f invalid -->
<!-- GMEM save/restore for preemption: -->
- <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE" pos="0" type="boolean"/>
+ <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE" pos="0" type="boolean"/>
<!-- address for GMEM save/restore? -->
- <reg32 offset="0x8e51" name="RB_UNKNOWN_8E51" type="waddress" align="1"/>
+ <reg32 offset="0x8e51" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR" type="waddress" align="1"/>
<!-- 0x8e53-0x8e7f invalid -->
<reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/>
<!-- 0x8e80-0x8e83 are valid -->
@@ -4069,38 +1917,38 @@ to upconvert to 32b float internally?
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <bitset name="a6xx_vpc_xs_layer_cntl" inline="yes">
+ <bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
<bitfield name="VIEWLOC" low="8" high="15" type="uint"/>
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9314" name="VPC_VS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
- <!-- this mirrors PC_RASTER_CNTL::DISCARD, although it seems it's unused -->
+ <!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_POLYGON_MODE" usage="rp_blit">
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <bitset name="a6xx_primitive_cntl_0" inline="yes">
+ <bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
<bitfield name="D3D_VERTEX_ORDERING" pos="2" type="boolean">
@@ -4113,7 +1961,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK3" pos="3" type="boolean"/>
</bitset>
- <bitset name="a6xx_primitive_cntl_5" inline="yes">
+ <bitset name="a6xx_gs_param_0" inline="yes">
<doc>
geometry shader
</doc>
@@ -4125,7 +1973,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK18" pos="18"/>
</bitset>
- <bitset name="a6xx_multiview_cntl" inline="yes">
+ <bitset name="a6xx_stereo_rendering_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<bitfield name="DISABLEMULTIPOS" pos="1" type="boolean">
<doc>
@@ -4139,10 +1987,10 @@ to upconvert to 32b float internally?
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_MULTIVIEW_MASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -4159,11 +2007,11 @@ to upconvert to 32b float internally?
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_PS_REPL" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -4172,12 +2020,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VAR" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -4198,7 +2046,7 @@ to upconvert to 32b float internally?
When EmitStreamVertex(N) happens, the HW goes to DWORD
64 * N and then "executes" the next 64 DWORD's.
- This field is auto-incremented when VPC_SO_PROG is
+ This field is auto-incremented when VPC_SO_MAPPING_PORT is
written to.
-->
<bitfield name="ADDR" low="0" high="7" type="hex"/>
@@ -4206,7 +2054,7 @@ to upconvert to 32b float internally?
<bitfield name="RESET" pos="16" type="boolean"/>
</reg32>
<!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_PROG" usage="rp_blit">
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
@@ -4215,7 +2063,7 @@ to upconvert to 32b float internally?
<bitfield name="B_EN" pos="23" type="boolean"/>
</reg32>
- <reg64 offset="0x9218" name="VPC_SO_STREAM_COUNTS" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
<array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
@@ -4225,14 +2073,14 @@ to upconvert to 32b float internally?
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_POINT_COORD_INVERT" usage="cmd">
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
<bitfield name="INVERT" pos="0" type="boolean"/>
</reg32>
<!-- 0x9237-0x92ff invalid -->
<!-- always 0x0 ? -->
<reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
- <bitset name="a6xx_vpc_xs_pack" inline="yes">
+ <bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4249,11 +2097,11 @@ to upconvert to 32b float internally?
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_CNTL_0" usage="rp_blit">
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -4272,7 +2120,7 @@ to upconvert to 32b float internally?
</bitfield>
</reg32>
- <reg32 offset="0x9305" name="VPC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -4282,19 +2130,19 @@ to upconvert to 32b float internally?
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
- <reg32 offset="0x9306" name="VPC_SO_DISABLE" usage="rp_blit">
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
<bitfield name="DISABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x9307" name="VPC_POLYGON_MODE2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_BASE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
<bitfield name="BASE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
@@ -4311,15 +2159,15 @@ to upconvert to 32b float internally?
<!-- TODO: regs from 0x9624-0x963a -->
<!-- 0x963b-0x97ff invalid -->
- <reg32 offset="0x9800" name="PC_TESS_NUM_VERTEX" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
<!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_INPUT_SIZE" usage="rp_blit">
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
</reg32>
- <reg32 offset="0x9802" name="PC_TESS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
</reg32>
@@ -4334,7 +2182,7 @@ to upconvert to 32b float internally?
</reg32>
<!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
@@ -4344,15 +2192,15 @@ to upconvert to 32b float internally?
<!-- 0x980b-0x983f invalid -->
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_CMD">
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9841" name="PC_DISPATCH_CMD">
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9842" name="PC_EVENT_CMD">
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4367,27 +2215,27 @@ to upconvert to 32b float internally?
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_POLYGON_MODE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9809" name="PC_POLYGON_MODE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9980" name="PC_RASTER_CNTL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <!-- VPC_RASTER_CNTL -->
- <reg32 offset="0x9107" name="PC_RASTER_CNTL" variants="A7XX-" usage="rp_blit">
+ <!-- VPC_RAST_STREAM_CNTL -->
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9317" name="PC_RASTER_CNTL_V2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
@@ -4397,17 +2245,17 @@ to upconvert to 32b float internally?
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_TESS_PARAM_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TESS_FACTOR_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
- <bitset name="a6xx_xs_out_cntl" inline="yes">
+ <bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4417,19 +2265,19 @@ to upconvert to 32b float internally?
<bitfield name="PSIZE" pos="8" type="boolean"/>
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
- <!-- note: PC_VS_OUT_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
<!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -4438,9 +2286,9 @@ to upconvert to 32b float internally?
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_MULTIVIEW_MASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
@@ -4451,31 +2299,31 @@ to upconvert to 32b float internally?
<!-- TODO: 0x9e00-0xa000 range incomplete -->
<reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
<reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DRAW_INDX_BASE"/>
- <reg32 offset="0x9e06" name="PC_DRAW_FIRST_INDX" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DRAW_MAX_INDICES" type="uint"/>
- <reg64 offset="0x9e08" name="PC_TESSFACTOR_ADDR" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESSFACTOR_ADDR" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+ <reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAW_INITIATOR" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAW_NUM_INSTANCES" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAW_NUM_INDICES" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VSTREAM_CONTROL">
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
</reg32>
- <reg64 offset="0x9e12" name="PC_BIN_PRIM_STRM" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_BIN_DRAW_STRM" type="waddress" align="32"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg32 offset="0x9e1c" name="PC_VISIBILITY_OVERRIDE">
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
</reg32>
@@ -4488,18 +2336,18 @@ to upconvert to 32b float internally?
<!-- always 0x0 -->
<reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/>
- <reg32 offset="0xa000" name="VFD_CONTROL_0" usage="rp_blit">
+ <reg32 offset="0xa000" name="VFD_CNTL_0" usage="rp_blit">
<bitfield name="FETCH_CNT" low="0" high="5" type="uint"/>
<bitfield name="DECODE_CNT" low="8" high="13" type="uint"/>
</reg32>
- <reg32 offset="0xa001" name="VFD_CONTROL_1" usage="rp_blit">
+ <reg32 offset="0xa001" name="VFD_CNTL_1" usage="rp_blit">
<bitfield name="REGID4VTX" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID4INST" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID4PRIMID" low="16" high="23" type="a3xx_regid"/>
<!-- only used for VS in non-multi-position-output case -->
<bitfield name="REGID4VIEWID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa002" name="VFD_CONTROL_2" usage="rp_blit">
+ <reg32 offset="0xa002" name="VFD_CNTL_2" usage="rp_blit">
<bitfield name="REGID_HSRELPATCHID" low="0" high="7" type="a3xx_regid">
<doc>
This is the ID of the current patch within the
@@ -4512,32 +2360,32 @@ to upconvert to 32b float internally?
</bitfield>
<bitfield name="REGID_INVOCATIONID" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa003" name="VFD_CONTROL_3" usage="rp_blit">
+ <reg32 offset="0xa003" name="VFD_CNTL_3" usage="rp_blit">
<bitfield name="REGID_DSPRIMID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID_DSRELPATCHID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
<bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa004" name="VFD_CONTROL_4" usage="rp_blit">
+ <reg32 offset="0xa004" name="VFD_CNTL_4" usage="rp_blit">
<bitfield name="UNK0" low="0" high="7" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa005" name="VFD_CONTROL_5" usage="rp_blit">
+ <reg32 offset="0xa005" name="VFD_CNTL_5" usage="rp_blit">
<bitfield name="REGID_GSHEADER" low="0" high="7" type="a3xx_regid"/>
<bitfield name="UNK8" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa006" name="VFD_CONTROL_6" usage="rp_blit">
+ <reg32 offset="0xa006" name="VFD_CNTL_6" usage="rp_blit">
<!--
True if gl_PrimitiveID is read via the FS
-->
<bitfield name="PRIMID4PSEN" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xa007" name="VFD_MODE_CNTL" usage="cmd">
+ <reg32 offset="0xa007" name="VFD_RENDER_MODE" usage="cmd">
<bitfield name="RENDER_MODE" low="0" high="2" type="a6xx_render_mode"/>
</reg32>
- <reg32 offset="0xa008" name="VFD_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
- <reg32 offset="0xa009" name="VFD_ADD_OFFSET" usage="cmd">
+ <reg32 offset="0xa008" name="VFD_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0xa009" name="VFD_MODE_CNTL" usage="cmd">
<!-- add VFD_INDEX_OFFSET to REGID4VTX -->
<bitfield name="VERTEX" pos="0" type="boolean"/>
<!-- add VFD_INSTANCE_START_OFFSET to REGID4INST -->
@@ -4546,14 +2394,14 @@ to upconvert to 32b float internally?
<reg32 offset="0xa00e" name="VFD_INDEX_OFFSET" usage="rp_blit"/>
<reg32 offset="0xa00f" name="VFD_INSTANCE_START_OFFSET" usage="rp_blit"/>
- <array offset="0xa010" name="VFD_FETCH" stride="4" length="32" usage="rp_blit">
+ <array offset="0xa010" name="VFD_VERTEX_BUFFER" stride="4" length="32" usage="rp_blit">
<reg64 offset="0x0" name="BASE" type="address" align="1"/>
<reg32 offset="0x2" name="SIZE" type="uint"/>
<reg32 offset="0x3" name="STRIDE" low="0" high="11" type="uint"/>
</array>
- <array offset="0xa090" name="VFD_DECODE" stride="2" length="32" usage="rp_blit">
+ <array offset="0xa090" name="VFD_FETCH_INSTR" stride="2" length="32" usage="rp_blit">
<reg32 offset="0x0" name="INSTR">
- <!-- IDX and byte OFFSET into VFD_FETCH -->
+ <!-- IDX and byte OFFSET into VFD_VERTEX_BUFFER -->
<bitfield name="IDX" low="0" high="4" type="uint"/>
<bitfield name="OFFSET" low="5" high="16"/>
<bitfield name="INSTANCED" pos="17" type="boolean"/>
@@ -4573,7 +2421,7 @@ to upconvert to 32b float internally?
<reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0xa600" name="VFD_UNKNOWN_A600" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
<array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/>
@@ -4588,7 +2436,7 @@ to upconvert to 32b float internally?
<value value="1" name="THREAD128"/>
</enum>
- <bitset name="a6xx_sp_xs_ctrl_reg0" inline="yes">
+ <bitset name="a6xx_sp_xs_cntl_0" inline="yes">
<!-- if set to SINGLE, only use 1 concurrent wave on each SP -->
<bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
<!--
@@ -4620,7 +2468,7 @@ to upconvert to 32b float internally?
-->
<bitfield name="BINDLESS_TEX" pos="0" type="boolean"/>
<bitfield name="BINDLESS_SAMP" pos="1" type="boolean"/>
- <bitfield name="BINDLESS_IBO" pos="2" type="boolean"/>
+ <bitfield name="BINDLESS_UAV" pos="2" type="boolean"/>
<bitfield name="BINDLESS_UBO" pos="3" type="boolean"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
@@ -4630,17 +2478,17 @@ to upconvert to 32b float internally?
-->
<bitfield name="NTEX" low="9" high="16" type="uint"/>
<bitfield name="NSAMP" low="17" high="21" type="uint"/>
- <bitfield name="NIBO" low="22" high="28" type="uint"/>
+ <bitfield name="NUAV" low="22" high="28" type="uint"/>
</bitset>
- <bitset name="a6xx_sp_xs_prim_cntl" inline="yes">
+ <bitset name="a6xx_sp_xs_output_cntl" inline="yes">
<!-- # of VS outputs including pos/psize -->
<bitfield name="OUT" low="0" high="5" type="uint"/>
<!-- FLAGS_REGID only for GS -->
<bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xa800" name="SP_VS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa800" name="SP_VS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!--
This field actually controls all geometry stages. TCS, TES, and
GS must have the same mergedregs setting as VS.
@@ -4665,10 +2513,10 @@ to upconvert to 32b float internally?
</reg32>
<!-- bitmask of true/false conditions for VS brac.N instructions,
bit N corresponds to brac.N -->
- <reg32 offset="0xa801" name="SP_VS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa801" name="SP_VS_BOOLEAN_CF_MASK" type="hex"/>
<!-- # of VS outputs including pos/psize -->
- <reg32 offset="0xa802" name="SP_VS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa803" name="SP_VS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa802" name="SP_VS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa803" name="SP_VS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4678,12 +2526,12 @@ to upconvert to 32b float internally?
</array>
<!--
Starting with a5xx, position/psize outputs from shader end up in the
- SP_VS_OUT map, with highest OUTLOCn position. (Generally they are
+ SP_VS_OUTPUT map, with highest OUTLOCn position. (Generally they are
the last entries too, except when gl_PointCoord is used, blob inserts
an extra varying after, but with a lower OUTLOC position. If present,
psize is last, preceded by position.
-->
- <array offset="0xa813" name="SP_VS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa813" name="SP_VS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4752,7 +2600,7 @@ to upconvert to 32b float internally?
</bitfield>
</bitset>
- <bitset name="a6xx_sp_xs_pvt_mem_hw_stack_offset" inline="yes">
+ <bitset name="a6xx_sp_xs_pvt_mem_stack_offset" inline="yes">
<doc>
This seems to be be the equivalent of HWSTACKOFFSET in
a3xx. The ldp/stp offset formula above isn't affected by
@@ -4763,18 +2611,18 @@ to upconvert to 32b float internally?
<bitfield name="OFFSET" low="0" high="18" shr="11"/>
</bitset>
- <reg32 offset="0xa81b" name="SP_VS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa81c" name="SP_VS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa81b" name="SP_VS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa81c" name="SP_VS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa821" name="SP_VS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa822" name="SP_VS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa822" name="SP_VS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa824" name="SP_VS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa825" name="SP_VS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82d" name="SP_VS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa824" name="SP_VS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa825" name="SP_VS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82d" name="SP_VS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa830" name="SP_HS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa830" name="SP_HS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
@@ -4782,32 +2630,32 @@ to upconvert to 32b float internally?
Total size of local storage in dwords divided by the wave size.
The maximum value is 64. With the wave size being always 64 for HS,
the maximum size of local storage should be:
- 64 (wavesize) * 64 (SP_HS_WAVE_INPUT_SIZE) * 4 = 16k
+ 64 (wavesize) * 64 (SP_HS_CNTL_1) * 4 = 16k
-->
- <reg32 offset="0xa831" name="SP_HS_WAVE_INPUT_SIZE" low="0" high="7" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa832" name="SP_HS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa831" name="SP_HS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa832" name="SP_HS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa833" name="SP_HS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa834" name="SP_HS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa833" name="SP_HS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa834" name="SP_HS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa836" name="SP_HS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa837" name="SP_HS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa837" name="SP_HS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa839" name="SP_HS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa83a" name="SP_HS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83a" name="SP_HS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa83c" name="SP_HS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82f" name="SP_HS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa83c" name="SP_HS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82f" name="SP_HS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa840" name="SP_DS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa840" name="SP_DS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa841" name="SP_DS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa841" name="SP_DS_BOOLEAN_CF_MASK" type="hex"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa842" name="SP_DS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa843" name="SP_DS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa842" name="SP_DS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa843" name="SP_DS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4815,7 +2663,7 @@ to upconvert to 32b float internally?
<bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
</reg32>
</array>
- <array offset="0xa853" name="SP_DS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa853" name="SP_DS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4825,22 +2673,22 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa85b" name="SP_DS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa85c" name="SP_DS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa85b" name="SP_DS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa85c" name="SP_DS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa85e" name="SP_DS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa861" name="SP_DS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa862" name="SP_DS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa862" name="SP_DS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa864" name="SP_DS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa865" name="SP_DS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa868" name="SP_DS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa864" name="SP_DS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa865" name="SP_DS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa868" name="SP_DS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa870" name="SP_GS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa870" name="SP_GS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa871" name="SP_GS_PRIM_SIZE" low="0" high="7" type="uint" usage="rp_blit">
+ <reg32 offset="0xa871" name="SP_GS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit">
<doc>
Normally the size of the output of the last stage in
dwords. It should be programmed as follows:
@@ -4854,11 +2702,11 @@ to upconvert to 32b float internally?
doesn't matter in practice.
</doc>
</reg32>
- <reg32 offset="0xa872" name="SP_GS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa872" name="SP_GS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa873" name="SP_GS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa874" name="SP_GS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa874" name="SP_GS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4867,7 +2715,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa884" name="SP_GS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa884" name="SP_GS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4877,29 +2725,29 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa88c" name="SP_GS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa88d" name="SP_GS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa88c" name="SP_GS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa88d" name="SP_GS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa88f" name="SP_GS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa890" name="SP_GS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa890" name="SP_GS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa892" name="SP_GS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa893" name="SP_GS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa893" name="SP_GS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa895" name="SP_GS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa896" name="SP_GS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa899" name="SP_GS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
-
- <reg64 offset="0xa8a0" name="SP_VS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a2" name="SP_HS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a4" name="SP_DS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a6" name="SP_GS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a8" name="SP_VS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8aa" name="SP_HS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ac" name="SP_DS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ae" name="SP_GS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg32 offset="0xa895" name="SP_GS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa896" name="SP_GS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa899" name="SP_GS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
+
+ <reg64 offset="0xa8a0" name="SP_VS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a2" name="SP_HS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a4" name="SP_DS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a6" name="SP_GS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a8" name="SP_VS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8aa" name="SP_HS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ac" name="SP_DS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ae" name="SP_GS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<!-- TODO: 4 unknown bool registers 0xa8c0-0xa8c3 -->
- <reg32 offset="0xa980" name="SP_FS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa980" name="SP_PS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<bitfield name="UNK21" pos="21" type="boolean"/>
<bitfield name="VARYING" pos="22" type="boolean"/>
@@ -4909,8 +2757,7 @@ to upconvert to 32b float internally?
fine derivatives and quad subgroup ops.
</doc>
</bitfield>
- <!-- note: vk blob uses bit24 -->
- <bitfield name="UNK24" pos="24" type="boolean"/>
+ <bitfield name="INOUTREGOVERLAP" pos="24" type="boolean"/>
<bitfield name="UNK25" pos="25" type="boolean"/>
<bitfield name="PIXLODENABLE" pos="26" type="boolean">
<doc>
@@ -4923,12 +2770,12 @@ to upconvert to 32b float internally?
<bitfield name="EARLYPREAMBLE" pos="28" type="boolean"/>
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="0xa981" name="SP_FS_BRANCH_COND" type="hex"/>
- <reg32 offset="0xa982" name="SP_FS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa983" name="SP_FS_OBJ_START" type="address" align="32" usage="rp_blit"/>
- <reg32 offset="0xa985" name="SP_FS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa986" name="SP_FS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
- <reg32 offset="0xa988" name="SP_FS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa981" name="SP_PS_BOOLEAN_CF_MASK" type="hex"/>
+ <reg32 offset="0xa982" name="SP_PS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa983" name="SP_PS_BASE" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa985" name="SP_PS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa986" name="SP_PS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa988" name="SP_PS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
<reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
@@ -4948,7 +2795,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT6" pos="6" type="boolean"/>
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0xa98b" name="SP_FS_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0xa98b" name="SP_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -4958,17 +2805,17 @@ to upconvert to 32b float internally?
<bitfield name="RT6" low="24" high="27"/>
<bitfield name="RT7" low="28" high="31"/>
</reg32>
- <reg32 offset="0xa98c" name="SP_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0xa98c" name="SP_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPMASK_REGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="STENCILREF_REGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa98d" name="SP_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0xa98d" name="SP_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <array offset="0xa98e" name="SP_FS_OUTPUT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa98e" name="SP_PS_OUTPUT" stride="1" length="8" usage="rp_blit">
<doc>per MRT</doc>
<reg32 offset="0x0" name="REG">
<bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
@@ -4976,7 +2823,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa996" name="SP_FS_MRT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa996" name="SP_PS_MRT" stride="1" length="8" usage="rp_blit">
<reg32 offset="0" name="REG">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="COLOR_SINT" pos="8" type="boolean"/>
@@ -4985,7 +2832,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <reg32 offset="0xa99e" name="SP_FS_PREFETCH_CNTL" usage="rp_blit">
+ <reg32 offset="0xa99e" name="SP_PS_INITIAL_TEX_LOAD_CNTL" usage="rp_blit">
<bitfield name="COUNT" low="0" high="2" type="uint"/>
<bitfield name="IJ_WRITE_DISABLE" pos="3" type="boolean"/>
<doc>
@@ -5002,7 +2849,7 @@ to upconvert to 32b float internally?
<!-- Blob never uses it -->
<bitfield name="CONSTSLOTID4COORD" low="16" high="24" type="uint" variants="A7XX-"/>
</reg32>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A6XX" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A6XX" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A6XX">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="10" type="uint"/>
@@ -5016,7 +2863,7 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="29" high="31" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A7XX-" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A7XX-" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A7XX-">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="9" type="uint"/>
@@ -5028,22 +2875,23 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="26" high="29" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa9a3" name="SP_FS_BINDLESS_PREFETCH" stride="1" length="4" usage="rp_blit">
+ <array offset="0xa9a3" name="SP_PS_INITIAL_TEX_INDEX" stride="1" length="4" usage="rp_blit">
<reg32 offset="0" name="CMD">
<bitfield name="SAMP_ID" low="0" high="15" type="uint"/>
<bitfield name="TEX_ID" low="16" high="31" type="uint"/>
</reg32>
</array>
- <reg32 offset="0xa9a7" name="SP_FS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa9a7" name="SP_PS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? -->
- <reg32 offset="0xa9a9" name="SP_FS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9a9" name="SP_PS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9ab" name="SP_PS_UNKNOWN_A9AB" variants="A7XX-" usage="cmd"/>
<!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS -->
- <reg32 offset="0xa9b0" name="SP_CS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="cmd">
+ <reg32 offset="0xa9b0" name="SP_CS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="cmd">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<!-- seems to make SP use less concurrent threads when possible? -->
<bitfield name="UNK21" pos="21" type="boolean"/>
@@ -5053,8 +2901,15 @@ to upconvert to 32b float internally?
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
+ <enum name="a6xx_const_ram_mode">
+ <value value="0x0" name="CONSTLEN_128"/>
+ <value value="0x1" name="CONSTLEN_192"/>
+ <value value="0x2" name="CONSTLEN_256"/>
+ <value value="0x3" name="CONSTLEN_512"/> <!-- a7xx only -->
+ </enum>
+
<!-- set for compute shaders -->
- <reg32 offset="0xa9b1" name="SP_CS_UNKNOWN_A9B1" usage="cmd">
+ <reg32 offset="0xa9b1" name="SP_CS_CNTL_1" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint">
<doc>
If 0 - all 32k of shared storage is enabled, otherwise
@@ -5065,32 +2920,36 @@ to upconvert to 32b float internally?
always return 0)
</doc>
</bitfield>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode">
+ <doc>
+ This defines the split between consts and local
+ memory in the Local Buffer. The programmed value
+ must be at least the actual CONSTLEN.
+ </doc>
+ </bitfield>
</reg32>
- <reg32 offset="0xa9b2" name="SP_CS_BRANCH_COND" type="hex" usage="cmd"/>
- <reg32 offset="0xa9b3" name="SP_CS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="cmd"/>
- <reg64 offset="0xa9b4" name="SP_CS_OBJ_START" type="address" align="32" usage="cmd"/>
+ <reg32 offset="0xa9b2" name="SP_CS_BOOLEAN_CF_MASK" type="hex" usage="cmd"/>
+ <reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
+ <reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_ADDR" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
- <reg32 offset="0xa9ba" name="SP_CS_TEX_COUNT" low="0" high="7" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
- <reg32 offset="0xa9bc" name="SP_CS_INSTRLEN" low="0" high="27" type="uint" usage="cmd"/>
- <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="cmd"/>
+ <reg32 offset="0xa9bc" name="SP_CS_INSTR_SIZE" low="0" high="27" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="cmd"/>
<reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa9c5" name="SP_CS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c5" name="SP_CS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_0 -->
- <reg32 offset="0xa9c2" name="SP_CS_CNTL_0" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_CONST_CONFIG_0 -->
+ <reg32 offset="0xa9c2" name="SP_CS_WIE_CNTL_0" usage="cmd">
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="WGSIZECONSTID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_1 -->
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_WGE_CNTL -->
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A6XX" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5102,7 +2961,18 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A7XX-" usage="cmd">
+ <enum name="a7xx_workitem_rast_order">
+ <value value="0x0" name="WORKITEMRASTORDER_LINEAR"/>
+ <doc>
+ This is a fixed tiling, with 4x4 invocation outer tiles
+ containing 2x2 invocation inner tiles. The intent is to
+ improve cache locality with textures and images accessed
+ using gl_LocalInvocationID.
+ </doc>
+ <value value="0x1" name="WORKITEMRASTORDER_TILED"/>
+ </enum>
+
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A7XX-" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
@@ -5110,18 +2980,16 @@ to upconvert to 32b float internally?
<!-- 1 thread per wave (would hang if THREAD128 is also set) -->
<bitfield name="THREADSIZE_SCALAR" pos="9" type="boolean"/>
- <!-- Affects getone. If enabled, getone sometimes executed 1? less times
- than there are subgroups.
- -->
- <bitfield name="UNK15" pos="15" type="boolean"/>
+ <doc>How invocations/fibers within a workgroup are tiled.</doc>
+ <bitfield name="WORKITEMRASTORDER" pos="15" type="a7xx_workitem_rast_order"/>
</reg32>
<!-- TODO: two 64kb aligned addresses at a9d0/a9d2 -->
- <reg64 offset="0xa9e0" name="SP_FS_TEX_SAMP" type="address" align="16" usage="rp_blit"/>
- <reg64 offset="0xa9e2" name="SP_CS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa9e4" name="SP_FS_TEX_CONST" type="address" align="64" usage="rp_blit"/>
- <reg64 offset="0xa9e6" name="SP_CS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa9e0" name="SP_PS_SAMPLER_BASE" type="address" align="16" usage="rp_blit"/>
+ <reg64 offset="0xa9e2" name="SP_CS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa9e4" name="SP_PS_TEXMEMOBJ_BASE" type="address" align="64" usage="rp_blit"/>
+ <reg64 offset="0xa9e6" name="SP_CS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<enum name="a6xx_bindless_descriptor_size">
<doc>
@@ -5146,18 +3014,19 @@ to upconvert to 32b float internally?
</array>
<!--
- IBO state for compute shader:
+ UAV state for compute shader:
-->
- <reg64 offset="0xa9f2" name="SP_CS_IBO" type="address" align="16"/>
- <reg32 offset="0xaa00" name="SP_CS_IBO_COUNT" low="0" high="6" type="uint"/>
+ <reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
- <reg32 offset="0xaa01" name="SP_FS_VGPR_CONFIG" type="uint" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xaa01" name="SP_PS_VGS_CNTL" type="uint" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xaa02" name="SP_PS_ALIASED_COMPONENTS_CONTROL" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa02" name="SP_PS_OUTPUT_CONST_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="ENABLED" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xaa03" name="SP_PS_ALIASED_COMPONENTS" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa03" name="SP_PS_OUTPUT_CONST_MASK" variants="A7XX-" usage="cmd">
<doc>
Specify for which components the output color should be read
from alias, e.g. for:
@@ -5167,7 +3036,7 @@ to upconvert to 32b float internally?
alias.1.b32.0 r1.x, c4.x
alias.1.b32.0 r0.x, c0.x
- the SP_PS_ALIASED_COMPONENTS would be 0x00001111
+ the SP_PS_OUTPUT_CONST_MASK would be 0x00001111
</doc>
<bitfield name="RT0" low="0" high="3"/>
@@ -5193,7 +3062,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="ISAMMODE_GL"/>
</enum>
- <reg32 offset="0xab00" name="SP_MODE_CONTROL" usage="rp_blit">
+ <reg32 offset="0xab00" name="SP_MODE_CNTL" usage="rp_blit">
<!--
When set, half register loads from the constant file will
load a 32-bit value (so hc0.y loads the same value as c0.y)
@@ -5210,16 +3079,16 @@ to upconvert to 32b float internally?
<reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xab04" name="SP_FS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xab05" name="SP_FS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xab04" name="SP_PS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xab05" name="SP_PS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
- <array offset="0xab10" name="SP_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
+ <array offset="0xab10" name="SP_GFX_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
</reg64>
</array>
- <array offset="0xab0a" name="SP_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
+ <array offset="0xab0a" name="SP_GFX_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A7XX-">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
@@ -5227,15 +3096,15 @@ to upconvert to 32b float internally?
</array>
<!--
- Combined IBO state for 3d pipe, used for Image and SSBO write/atomic
- instructions VS/HS/DS/GS/FS. See SP_CS_IBO_* for compute shaders.
+ Combined UAV state for 3d pipe, used for Image and SSBO write/atomic
+ instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
- <reg64 offset="0xab1a" name="SP_IBO" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_IBO_COUNT" low="0" high="6" type="uint" usage="cmd"/>
+ <reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
<reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
- <bitset name="a6xx_sp_2d_dst_format" inline="yes">
+ <bitset name="a6xx_sp_a2d_output_info" inline="yes">
<bitfield name="NORM" pos="0" type="boolean"/>
<bitfield name="SINT" pos="1" type="boolean"/>
<bitfield name="UINT" pos="2" type="boolean"/>
@@ -5248,8 +3117,8 @@ to upconvert to 32b float internally?
<bitfield name="MASK" low="12" high="15"/>
</bitset>
- <reg32 offset="0xacc0" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa9bf" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xacc0" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xa9bf" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/>
<reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
@@ -5257,16 +3126,16 @@ to upconvert to 32b float internally?
<!-- TODO: valid bits 0x3c3f, see kernel -->
</reg32>
<reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/>
- <reg32 offset="0xae04" name="SP_FLOAT_CNTL" usage="cmd">
+ <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="cmd">
<bitfield name="F16_NO_INF" pos="3" type="boolean"/>
</reg32>
<reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae08" name="SP_UNKNOWN_AE08" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae09" name="SP_UNKNOWN_AE09" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0a" name="SP_UNKNOWN_AE0A" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0f" name="SP_PERFCTR_ENABLE" usage="cmd">
+ <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="cmd">
<!-- some perfcntrs are affected by a per-stage enable bit
(PERF_SP_ALU_WORKING_CYCLES for example)
TODO: verify position of HS/DS/GS bits -->
@@ -5281,7 +3150,7 @@ to upconvert to 32b float internally?
<array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/>
<reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae6c" name="SP_UNKNOWN_AE6C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
<bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
<bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
@@ -5301,33 +3170,44 @@ to upconvert to 32b float internally?
"a6xx_sp_ps_tp_cluster" but this actually specifies the border
color base for compute shaders.
-->
- <reg64 offset="0xb180" name="SP_PS_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
+ <reg64 offset="0xb180" name="TPL1_CS_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
<reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/>
<reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/>
<reg32 offset="0xb190" name="SP_UNKNOWN_B190"/>
<reg32 offset="0xb191" name="SP_UNKNOWN_B191"/>
- <!-- could be all the stuff below here is actually TPL1?? -->
-
- <reg32 offset="0xb300" name="SP_TP_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb300" name="TPL1_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" low="2" high="3"/>
</reg32>
- <reg32 offset="0xb301" name="SP_TP_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb301" name="TPL1_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
<!-- looks to work in the same way as a5xx: -->
- <reg64 offset="0xb302" name="SP_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="SP_TP_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0xb305" name="SP_TP_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb306" name="SP_TP_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb307" name="SP_TP_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0xb309" name="SP_TP_MODE_CNTL" usage="cmd">
+ <reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+
+ <enum name="a6xx_coord_round">
+ <value value="0" name="COORD_TRUNCATE"/>
+ <value value="1" name="COORD_ROUND_NEAREST_EVEN"/>
+ </enum>
+
+ <enum name="a6xx_nearest_mode">
+ <value value="0" name="ROUND_CLAMP_TRUNCATE"/>
+ <value value="1" name="CLAMP_ROUND_TRUNCATE"/>
+ </enum>
+
+ <reg32 offset="0xb309" name="TPL1_MODE_CNTL" usage="cmd">
<bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/>
- <bitfield name="UNK3" low="2" high="7"/>
+ <bitfield name="TEXCOORDROUNDMODE" pos="2" type="a6xx_coord_round"/>
+ <bitfield name="NEARESTMIPSNAP" pos="5" type="a6xx_nearest_mode"/>
+ <bitfield name="DESTDATATYPEOVERRIDE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/>
@@ -5336,42 +3216,45 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb4c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb4c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c4" name="SP_PS_2D_SRC_PITCH" variants="A6XX" usage="rp_blit">
+ <reg64 offset="0xb4c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A6XX" usage="rp_blit">
<bitfield name="UNK0" low="0" high="8"/>
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb2c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="SP_PS_2D_SRC_PITCH" variants="A7XX">
- <bitfield name="UNK0" low="0" high="8"/>
- <bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
+ <reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <!--
+ Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
+ is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
+ -->
+ <bitfield name="PITCH" low="3" high="23" type="uint"/>
</reg32>
<!-- planes for NV12, etc. (TODO: not tested) -->
- <reg64 offset="0xb4c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A6XX"/>
- <reg32 offset="0xb4c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
- <reg64 offset="0xb4c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xb4c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A6XX"/>
+ <reg32 offset="0xb4c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
+ <reg64 offset="0xb4c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xb2c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A7XX-"/>
- <reg32 offset="0xb2c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
- <reg64 offset="0xb2c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A7XX-"/>
+ <reg64 offset="0xb2c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A7XX-"/>
+ <reg32 offset="0xb2c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
+ <reg64 offset="0xb2c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A7XX-"/>
- <reg64 offset="0xb4ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
+ <reg64 offset="0xb4ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
- <reg64 offset="0xb2ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
+ <reg64 offset="0xb2ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb4cd" name="SP_PS_UNKNOWN_B4CD" low="6" high="31" variants="A6XX"/>
<reg32 offset="0xb4ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A6XX"/>
@@ -5383,8 +3266,12 @@ to upconvert to 32b float internally?
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="SP_PS_2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
- <reg32 offset="0xb2d2" name="SP_PS_UNKNOWN_B2D2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="RAW_COPY" pos="0" type="boolean"/>
+ <bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
<reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
@@ -5422,34 +3309,44 @@ to upconvert to 32b float internally?
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
- <bitset name="a6xx_hlsq_xs_cntl" inline="yes">
+ <bitset name="a6xx_xs_const_config" inline="yes">
<bitfield name="CONSTLEN" low="0" high="7" shr="2" type="uint"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
<bitfield name="READ_IMM_SHARED_CONSTS" pos="9" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0xb800" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb801" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb802" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb803" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb800" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb801" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb802" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb803" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa827" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa83f" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa867" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa898" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa827" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa83f" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa867" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa898" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9aa" name="HLSQ_FS_UNKNOWN_A9AA" variants="A7XX-" usage="rp_blit">
- <!-- Tentatively named, appears to disable consts being loaded via CP_LOAD_STATE6_FRAG -->
- <bitfield name="CONSTS_LOAD_DISABLE" pos="0" type="boolean"/>
+ <reg32 offset="0xa9aa" name="SP_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="FS_DISABLE" pos="0" type="boolean"/>
</reg32>
- <!-- Always 0 -->
- <reg32 offset="0xa9ac" name="HLSQ_UNKNOWN_A9AC" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ac" name="SP_DITHER_CNTL" variants="A7XX-" usage="cmd">
+ <bitfield name="DITHER_MODE_MRT0" low="0" high="1" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT1" low="2" high="3" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT2" low="4" high="5" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT3" low="6" high="7" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT4" low="8" high="9" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT5" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT6" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT7" low="14" high="15" type="adreno_rb_dither_mode"/>
+ </reg32>
- <!-- Used in VK_KHR_fragment_shading_rate -->
- <reg32 offset="0xa9ad" name="HLSQ_UNKNOWN_A9AD" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ad" name="SP_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="3" type="boolean"/>
+ </reg32>
- <reg32 offset="0xa9ae" name="HLSQ_UNKNOWN_A9AE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9ae" name="SP_PS_CNTL_1" variants="A7XX-" usage="rp_blit">
<bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/>
<!-- UNK8 is set on a730/a740 -->
<bitfield name="UNK8" pos="8" type="boolean"/>
@@ -5462,94 +3359,94 @@ to upconvert to 32b float internally?
<reg32 offset="0xb823" name="HLSQ_LOAD_STATE_GEOM_DATA"/>
- <bitset name="a6xx_hlsq_fs_cntl_0" inline="yes">
+ <bitset name="a6xx_sp_ps_wave_cntl" inline="yes">
<!-- must match SP_FS_CTRL -->
<bitfield name="THREADSIZE" pos="0" type="a6xx_threadsize"/>
<bitfield name="VARYINGS" pos="1" type="boolean"/>
<bitfield name="UNK2" low="2" high="11"/>
</bitset>
- <bitset name="a6xx_hlsq_control_3_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_1" inline="yes">
<!-- register loaded with position (bary.f) -->
<bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
<bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_4_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_2" inline="yes">
<bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
<bitfield name="XYCOORDREGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_5_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_3" inline="yes">
<bitfield name="LINELENGTHREGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="FOVEATIONQUALITYREGID" low="8" high="15" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xb980" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb980" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb981" name="HLSQ_UNKNOWN_B981" pos="0" type="boolean" variants="A6XX"/> <!-- never used by blob -->
- <reg32 offset="0xb982" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb982" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A6XX" usage="rp_blit">
<!-- Sets the maximum number of primitives allowed in one FS wave minus one, similarly to the
A3xx field, except that it's not necessary to set it to anything but the maximum, since
the hardware will simply emit smaller waves when it runs out of space. -->
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xb983" name="HLSQ_CONTROL_2_REG" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb983" name="SP_REG_PROG_ID_0" variants="A6XX" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb984" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb985" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb986" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb987" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xa9c6" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9c7" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xb984" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb985" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb986" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb987" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9c7" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A7XX-" usage="rp_blit">
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xa9c8" name="HLSQ_CONTROL_2_REG" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9c8" name="SP_REG_PROG_ID_0" variants="A7XX-" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa9c9" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9ca" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cb" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cd" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c9" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9ca" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cb" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cd" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="cmd"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xb990" name="HLSQ_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb990" name="SP_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb991" name="HLSQ_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb991" name="SP_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb992" name="HLSQ_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb992" name="SP_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb993" name="HLSQ_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb993" name="SP_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb994" name="HLSQ_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb994" name="SP_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb995" name="HLSQ_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb995" name="SP_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb996" name="HLSQ_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb996" name="SP_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb997" name="HLSQ_CS_CNTL_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb997" name="SP_CS_CONST_CONFIG_0" variants="A6XX" usage="rp_blit">
<!-- these are all vec3. first 3 need to be high regs
- WGSIZECONSTID is the local size (from HLSQ_CS_NDRANGE_0)
+ WGSIZECONSTID is the local size (from SP_CS_NDRANGE_0)
WGOFFSETCONSTID is WGIDCONSTID*WGSIZECONSTID
-->
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
@@ -5557,7 +3454,7 @@ to upconvert to 32b float internally?
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb998" name="HLSQ_CS_CNTL_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb998" name="SP_CS_WGE_CNTL" variants="A6XX" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5569,40 +3466,40 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xb999" name="HLSQ_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99a" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99b" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb999" name="SP_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99a" name="SP_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99b" name="SP_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xa9d4" name="HLSQ_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d4" name="SP_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d5" name="HLSQ_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d5" name="SP_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d6" name="HLSQ_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d6" name="SP_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d7" name="HLSQ_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d7" name="SP_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d8" name="HLSQ_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d8" name="SP_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d9" name="HLSQ_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d9" name="SP_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9da" name="HLSQ_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9da" name="SP_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xa9dc" name="HLSQ_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9dd" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9de" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dc" name="SP_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dd" name="SP_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9de" name="SP_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
<enum name="a7xx_cs_yalign">
<value name="CS_YALIGN_1" value="8"/>
@@ -5611,19 +3508,29 @@ to upconvert to 32b float internally?
<value name="CS_YALIGN_8" value="1"/>
</enum>
- <reg32 offset="0xa9db" name="HLSQ_CS_CNTL_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9db" name="SP_CS_WGE_CNTL" variants="A7XX-" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
<bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
- <bitfield name="UNK11" pos="11" type="boolean"/>
- <bitfield name="UNK22" pos="22" type="boolean"/>
- <bitfield name="UNK26" pos="26" type="boolean"/>
- <bitfield name="YALIGN" low="27" high="30" type="a7xx_cs_yalign"/>
+ <doc>
+ When this bit is enabled, the dispatch order interleaves
+ the z coordinate instead of launching all workgroups
+ with z=0, then all with z=1 and so on.
+ </doc>
+ <bitfield name="WORKGROUPRASTORDERZFIRSTEN" pos="11" type="boolean"/>
+ <doc>
+ When both fields are non-0 then the dispatcher uses
+ these tile sizes to launch workgroups in a tiled manner
+ when the x and y workgroup counts are
+ both more than 1.
+ </doc>
+ <bitfield name="WGTILEWIDTH" low="20" high="25"/>
+ <bitfield name="WGTILEHEIGHT" low="26" high="31"/>
</reg32>
- <reg32 offset="0xa9df" name="HLSQ_CS_LOCAL_SIZE" variants="A7XX-" usage="cmd">
- <!-- localsize is value minus one: -->
+ <reg32 offset="0xa9df" name="SP_CS_NDRANGE_7" variants="A7XX-" usage="cmd">
+ <!-- The size of the last workgroup. localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
@@ -5641,29 +3548,27 @@ to upconvert to 32b float internally?
</reg64>
</array>
- <!-- new in a6xx gen4, mirror of SP_CS_UNKNOWN_A9B1? -->
- <reg32 offset="0xb9d0" name="HLSQ_CS_UNKNOWN_B9D0" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, mirror of SP_CS_CNTL_1? -->
+ <reg32 offset="0xb9d0" name="HLSQ_CS_CTRL_REG1" variants="A6XX" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint"/>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
+ <reg32 offset="0xbb00" name="SP_DRAW_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
+ <reg32 offset="0xbb01" name="SP_KERNEL_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
+ <reg32 offset="0xbb02" name="SP_EVENT_INITIATOR" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xbb08" name="HLSQ_INVALIDATE_CMD" variants="A6XX" usage="cmd">
+ <reg32 offset="0xbb08" name="SP_UPDATE_CNTL" variants="A6XX" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5678,8 +3583,8 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- Note: these only do something when HLSQ_SHARED_CONSTS is set to 1 -->
<bitfield name="CS_SHARED_CONST" pos="19" type="boolean"/>
@@ -5690,20 +3595,20 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
- <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <reg32 offset="0xab1c" name="SP_DRAW_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <reg32 offset="0xab1d" name="SP_KERNEL_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <reg32 offset="0xab1e" name="SP_EVENT_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5718,18 +3623,18 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- SS6_BINDLESS: one bit per bindless base -->
<bitfield name="CS_BINDLESS" low="9" high="16" type="hex"/>
<bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/>
</reg32>
- <reg32 offset="0xbb10" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xab03" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="HLSQ_SHARED_CONSTS_IMM" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -5738,7 +3643,7 @@ to upconvert to 32b float internally?
const pool and 16 in the geometry const pool although
only 8 are actually used (why?) and they are mapped to
c504-c511 in each stage. Both VS and FS shared consts
- are written using ST6_CONSTANTS/SB6_IBO, so that both
+ are written using ST6_CONSTANTS/SB6_UAV, so that both
the geometry and FS shared consts can be written at once
by using CP_LOAD_STATE6 rather than
CP_LOAD_STATE6_FRAG/CP_LOAD_STATE6_GEOM. In addition
@@ -5747,13 +3652,13 @@ to upconvert to 32b float internally?
There is also a separate shared constant pool for CS,
which is loaded through CP_LOAD_STATE6_FRAG with
- ST6_UBO/ST6_IBO. However the only real difference for CS
+ ST6_UBO/ST6_UAV. However the only real difference for CS
is the dword units.
</doc>
<bitfield name="ENABLE" pos="0" type="boolean"/>
</reg32>
- <!-- mirror of SP_BINDLESS_BASE -->
+ <!-- mirror of SP_GFX_BINDLESS_BASE -->
<array offset="0xbb20" name="HLSQ_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="cmd">
<reg64 offset="0" name="DESCRIPTOR">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
@@ -5788,10 +3693,10 @@ to upconvert to 32b float internally?
sequence. The sequence used internally for an event looks like:
- write EVENT_CMD pipe register
- write CP_EVENT_START
- - write HLSQ_EVENT_CMD with event or HLSQ_DRAW_CMD
- - write PC_EVENT_CMD with event or PC_DRAW_CMD
- - write HLSQ_EVENT_CMD(CONTEXT_DONE)
- - write PC_EVENT_CMD(CONTEXT_DONE)
+ - write SP_EVENT_INITIATOR with event or SP_DRAW_INITIATOR
+ - write PC_EVENT_INITIATOR with event or PC_DRAW_INITIATOR
+ - write SP_EVENT_INITIATOR(CONTEXT_DONE)
+ - write PC_EVENT_INITIATOR(CONTEXT_DONE)
- write CP_EVENT_END
Writing to CP_EVENT_END seems to actually trigger the context roll
-->
@@ -5809,193 +3714,6 @@ to upconvert to 32b float internally?
</reg32>
</domain>
-<!-- Seems basically the same as a5xx, maybe move to common.xml.. -->
-<domain name="A6XX_TEX_SAMP" width="32">
- <doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
-
- <reg32 offset="0" name="0">
- <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
- <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
- <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
- <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
- <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
- <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="CLAMPENABLE" pos="0" type="boolean">
- <doc>
- clamp result to [0, 1] if the format is unorm or
- [-1, 1] if the format is snorm, *after*
- filtering. Has no effect for other formats.
- </doc>
- </bitfield>
- <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
- <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
- <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
- <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
- <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
- <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
- <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
- <bitfield name="BCOLOR" low="7" high="31"/>
- </reg32>
- <reg32 offset="3" name="3"/>
-</domain>
-
-<domain name="A6XX_TEX_CONST" width="32" varset="chip">
- <doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
- <enum name="a6xx_tex_type"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_1D" value="0"/>
- <value name="A6XX_TEX_2D" value="1"/>
- <value name="A6XX_TEX_CUBE" value="2"/>
- <value name="A6XX_TEX_3D" value="3"/>
- <value name="A6XX_TEX_BUFFER" value="4"/>
- </enum>
- <reg32 offset="0" name="0">
- <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
- <bitfield name="SRGB" pos="2" type="boolean"/>
- <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
- <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
- <!-- overlaps with MIPLVLS -->
- <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
- <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
- <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
- <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
- <!--
- Why is the swap needed in addition to SWIZ_*? The swap
- is performed before border color replacement, while the
- swizzle is applied after after it.
- -->
- <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="WIDTH" low="0" high="14" type="uint"/>
- <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
- <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="2" name="2">
- <!--
- These fields overlap PITCH, and are used instead of
- PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
- -->
- <doc> probably for D3D structured UAVs, normally set to 1 </doc>
- <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
- <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
-
- <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
- <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
- <doc>Pitch in bytes (so actually stride)</doc>
- <bitfield name="PITCH" low="7" high="28" type="uint"/>
- <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
- </reg32>
- <reg32 offset="3" name="3">
- <!--
- ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
- for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
- layer size at the point that it stops being reduced moving to
- higher (smaller) mipmap levels
- -->
- <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
- <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
- <!--
- by default levels with w < 16 are linear
- TILE_ALL makes all levels have tiling
- seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
- -->
- <bitfield name="TILE_ALL" pos="27" type="boolean"/>
- <bitfield name="FLAG" pos="28" type="boolean"/>
- </reg32>
- <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
- the address of the non-flag base buffer is determined automatically,
- and must follow the flag buffer
- -->
- <reg32 offset="4" name="4">
- <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="DEPTH" low="17" high="29" type="uint"/>
- </reg32>
- <reg32 offset="6" name="6">
- <!-- overlaps with PLANE_PITCH -->
- <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
- <!-- pitch for plane 2 / plane 3 -->
- <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
- </reg32>
- <!-- 7/8 is plane 2 address for planar formats -->
- <reg32 offset="7" name="7">
- <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="8" name="8">
- <bitfield name="FLAG_HI" low="0" high="16"/>
- </reg32>
- <!-- 9/10 is plane 3 address for planar formats -->
- <reg32 offset="9" name="9">
- <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
- </reg32>
- <reg32 offset="10" name="10">
- <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
- <!-- log2 size of the first level, required for mipmapping -->
- <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
- <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
- </reg32>
- <reg32 offset="11" name="11"/>
- <reg32 offset="12" name="12"/>
- <reg32 offset="13" name="13"/>
- <reg32 offset="14" name="14"/>
- <reg32 offset="15" name="15"/>
-</domain>
-
-<domain name="A6XX_UBO" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
- </reg32>
-</domain>
-
<domain name="A6XX_PDC" width="32">
<reg32 offset="0x1140" name="GPU_ENABLE_PDC"/>
<reg32 offset="0x1148" name="GPU_SEQ_START_ADDR"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
new file mode 100644
index 000000000000..307d43dda8a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+
+<domain name="A6XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+ </enum>
+ <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+ </enum>
+ <enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="CLAMPENABLE" pos="0" type="boolean">
+ <doc>
+ clamp result to [0, 1] if the format is unorm or
+ [-1, 1] if the format is snorm, *after*
+ filtering. Has no effect for other formats.
+ </doc>
+ </bitfield>
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
+ <bitfield name="FASTBORDERCOLOR" low="2" high="3" type="a6xx_fast_border_color"/>
+ <bitfield name="FASTBORDERCOLOREN" pos="4" type="boolean"/>
+ <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
+ <bitfield name="BCOLOR" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
+ <doc>Texture constant dwords</doc>
+ <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <!-- overlaps with MIPLVLS -->
+ <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
+ <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
+ <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
+ <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
+ <!--
+ Why is the swap needed in addition to SWIZ_*? The swap
+ is performed before border color replacement, while the
+ swizzle is applied after after it.
+ -->
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ These fields overlap PITCH, and are used instead of
+ PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
+ -->
+ <doc> probably for D3D structured UAVs, normally set to 1 </doc>
+ <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
+ <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
+
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="7" high="28" type="uint"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
+ for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
+ layer size at the point that it stops being reduced moving to
+ higher (smaller) mipmap levels
+ -->
+ <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
+ <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
+ <!--
+ by default levels with w < 16 are linear
+ TILE_ALL makes all levels have tiling
+ seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
+ -->
+ <bitfield name="TILE_ALL" pos="27" type="boolean"/>
+ <bitfield name="FLAG" pos="28" type="boolean"/>
+ </reg32>
+ <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
+ the address of the non-flag base buffer is determined automatically,
+ and must follow the flag buffer
+ -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="DEPTH" low="17" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <!-- overlaps with PLANE_PITCH -->
+ <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
+ <!-- pitch for plane 2 / plane 3 -->
+ <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
+ </reg32>
+ <!-- 7/8 is plane 2 address for planar formats -->
+ <reg32 offset="7" name="7">
+ <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="8" name="8">
+ <bitfield name="FLAG_HI" low="0" high="16"/>
+ </reg32>
+ <!-- 9/10 is plane 3 address for planar formats -->
+ <reg32 offset="9" name="9">
+ <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="10" name="10">
+ <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
+ <!-- log2 size of the first level, required for mipmapping -->
+ <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
+ <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="11" name="11"/>
+ <reg32 offset="12" name="12"/>
+ <reg32 offset="13" name="13"/>
+ <reg32 offset="14" name="14"/>
+ <reg32 offset="15" name="15"/>
+</domain>
+
+<domain name="A6XX_UBO" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
new file mode 100644
index 000000000000..665539b098c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -0,0 +1,383 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_tile_mode">
+ <value name="TILE6_LINEAR" value="0"/>
+ <value name="TILE6_2" value="2"/>
+ <value name="TILE6_3" value="3"/>
+</enum>
+
+<enum name="a6xx_format">
+ <value value="0x02" name="FMT6_A8_UNORM"/>
+ <value value="0x03" name="FMT6_8_UNORM"/>
+ <value value="0x04" name="FMT6_8_SNORM"/>
+ <value value="0x05" name="FMT6_8_UINT"/>
+ <value value="0x06" name="FMT6_8_SINT"/>
+
+ <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
+ <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
+ <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
+ <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
+
+ <value value="0x0f" name="FMT6_8_8_UNORM"/>
+ <value value="0x10" name="FMT6_8_8_SNORM"/>
+ <value value="0x11" name="FMT6_8_8_UINT"/>
+ <value value="0x12" name="FMT6_8_8_SINT"/>
+ <value value="0x13" name="FMT6_L8_A8_UNORM"/>
+
+ <value value="0x15" name="FMT6_16_UNORM"/>
+ <value value="0x16" name="FMT6_16_SNORM"/>
+ <value value="0x17" name="FMT6_16_FLOAT"/>
+ <value value="0x18" name="FMT6_16_UINT"/>
+ <value value="0x19" name="FMT6_16_SINT"/>
+
+ <value value="0x21" name="FMT6_8_8_8_UNORM"/>
+ <value value="0x22" name="FMT6_8_8_8_SNORM"/>
+ <value value="0x23" name="FMT6_8_8_8_UINT"/>
+ <value value="0x24" name="FMT6_8_8_8_SINT"/>
+
+ <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
+ <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
+ <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
+ <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
+ <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
+
+ <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
+
+ <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
+ <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
+ <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
+ <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
+ <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
+
+ <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
+
+ <value value="0x43" name="FMT6_16_16_UNORM"/>
+ <value value="0x44" name="FMT6_16_16_SNORM"/>
+ <value value="0x45" name="FMT6_16_16_FLOAT"/>
+ <value value="0x46" name="FMT6_16_16_UINT"/>
+ <value value="0x47" name="FMT6_16_16_SINT"/>
+
+ <value value="0x48" name="FMT6_32_UNORM"/>
+ <value value="0x49" name="FMT6_32_SNORM"/>
+ <value value="0x4a" name="FMT6_32_FLOAT"/>
+ <value value="0x4b" name="FMT6_32_UINT"/>
+ <value value="0x4c" name="FMT6_32_SINT"/>
+ <value value="0x4d" name="FMT6_32_FIXED"/>
+
+ <value value="0x58" name="FMT6_16_16_16_UNORM"/>
+ <value value="0x59" name="FMT6_16_16_16_SNORM"/>
+ <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
+ <value value="0x5b" name="FMT6_16_16_16_UINT"/>
+ <value value="0x5c" name="FMT6_16_16_16_SINT"/>
+
+ <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
+ <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
+ <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
+ <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
+
+ <value value="0x65" name="FMT6_32_32_UNORM"/>
+ <value value="0x66" name="FMT6_32_32_SNORM"/>
+ <value value="0x67" name="FMT6_32_32_FLOAT"/>
+ <value value="0x68" name="FMT6_32_32_UINT"/>
+ <value value="0x69" name="FMT6_32_32_SINT"/>
+ <value value="0x6a" name="FMT6_32_32_FIXED"/>
+
+ <value value="0x70" name="FMT6_32_32_32_UNORM"/>
+ <value value="0x71" name="FMT6_32_32_32_SNORM"/>
+ <value value="0x72" name="FMT6_32_32_32_UINT"/>
+ <value value="0x73" name="FMT6_32_32_32_SINT"/>
+ <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
+ <value value="0x75" name="FMT6_32_32_32_FIXED"/>
+
+ <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
+ <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
+ <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
+ <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
+ <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
+
+ <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
+ <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
+ <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
+ <value value="0x8f" name="FMT6_NV21"/>
+ <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
+
+ <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
+
+ <!-- Note: tiling/UBWC for these may be different from equivalent formats
+ For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
+ -->
+ <value value="0x94" name="FMT6_NV12_Y"/>
+ <value value="0x95" name="FMT6_NV12_UV"/>
+ <value value="0x96" name="FMT6_NV12_VU"/>
+ <value value="0x97" name="FMT6_NV12_4R"/>
+ <value value="0x98" name="FMT6_NV12_4R_Y"/>
+ <value value="0x99" name="FMT6_NV12_4R_UV"/>
+ <value value="0x9a" name="FMT6_P010"/>
+ <value value="0x9b" name="FMT6_P010_Y"/>
+ <value value="0x9c" name="FMT6_P010_UV"/>
+ <value value="0x9d" name="FMT6_TP10"/>
+ <value value="0x9e" name="FMT6_TP10_Y"/>
+ <value value="0x9f" name="FMT6_TP10_UV"/>
+
+ <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
+
+ <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
+ <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
+ <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
+ <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
+ <value value="0xaf" name="FMT6_ETC1"/>
+ <value value="0xb0" name="FMT6_ETC2_RGB8"/>
+ <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
+ <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
+ <value value="0xb3" name="FMT6_DXT1"/>
+ <value value="0xb4" name="FMT6_DXT3"/>
+ <value value="0xb5" name="FMT6_DXT5"/>
+ <value value="0xb6" name="FMT6_RGTC1_UNORM"/>
+ <value value="0xb7" name="FMT6_RGTC1_UNORM_FAST"/>
+ <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
+ <value value="0xb9" name="FMT6_RGTC1_SNORM_FAST"/>
+ <value value="0xba" name="FMT6_RGTC2_UNORM"/>
+ <value value="0xbb" name="FMT6_RGTC2_UNORM_FAST"/>
+ <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
+ <value value="0xbd" name="FMT6_RGTC2_SNORM_FAST"/>
+ <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
+ <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
+ <value value="0xc0" name="FMT6_BPTC"/>
+ <value value="0xc1" name="FMT6_ASTC_4x4"/>
+ <value value="0xc2" name="FMT6_ASTC_5x4"/>
+ <value value="0xc3" name="FMT6_ASTC_5x5"/>
+ <value value="0xc4" name="FMT6_ASTC_6x5"/>
+ <value value="0xc5" name="FMT6_ASTC_6x6"/>
+ <value value="0xc6" name="FMT6_ASTC_8x5"/>
+ <value value="0xc7" name="FMT6_ASTC_8x6"/>
+ <value value="0xc8" name="FMT6_ASTC_8x8"/>
+ <value value="0xc9" name="FMT6_ASTC_10x5"/>
+ <value value="0xca" name="FMT6_ASTC_10x6"/>
+ <value value="0xcb" name="FMT6_ASTC_10x8"/>
+ <value value="0xcc" name="FMT6_ASTC_10x10"/>
+ <value value="0xcd" name="FMT6_ASTC_12x10"/>
+ <value value="0xce" name="FMT6_ASTC_12x12"/>
+
+ <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
+ <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
+
+ <!-- Not a hw enum, used internally in driver -->
+ <value value="0xff" name="FMT6_NONE"/>
+
+</enum>
+
+<!-- probably same as a5xx -->
+<enum name="a6xx_polygon_mode">
+ <value name="POLYMODE6_POINTS" value="1"/>
+ <value name="POLYMODE6_LINES" value="2"/>
+ <value name="POLYMODE6_TRIANGLES" value="3"/>
+</enum>
+
+<enum name="a6xx_depth_format">
+ <value name="DEPTH6_NONE" value="0"/>
+ <value name="DEPTH6_16" value="1"/>
+ <value name="DEPTH6_24_8" value="2"/>
+ <value name="DEPTH6_32" value="4"/>
+</enum>
+
+<bitset name="a6x_cp_protect" inline="yes">
+ <bitfield name="BASE_ADDR" low="0" high="17"/>
+ <bitfield name="MASK_LEN" low="18" high="30"/>
+ <bitfield name="READ" pos="31" type="boolean"/>
+</bitset>
+
+<enum name="a6xx_shader_id">
+ <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
+ <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
+ <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
+ <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
+ <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
+ <value value="0x29" name="A6XX_SP_INST_DATA"/>
+ <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
+ <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
+ <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
+ <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
+ <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
+ <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
+ <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
+ <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
+ <value value="0x32" name="A6XX_SP_GFX_UAV_BASE_DATA"/>
+ <value value="0x33" name="A6XX_SP_INST_TAG"/>
+ <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
+ <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
+ <value value="0x36" name="A6XX_SP_SMO_TAG"/>
+ <value value="0x37" name="A6XX_SP_STATE_DATA"/>
+ <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
+ <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
+ <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
+ <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
+ <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
+ <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
+ <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
+ <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
+ <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
+ <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
+ <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
+</enum>
+
+<enum name="a6xx_debugbus_id">
+ <value value="0x1" name="A6XX_DBGBUS_CP"/>
+ <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
+ <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
+ <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
+ <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
+ <value value="0x6" name="A6XX_DBGBUS_DPM"/>
+ <value value="0x7" name="A6XX_DBGBUS_TESS"/>
+ <value value="0x8" name="A6XX_DBGBUS_PC"/>
+ <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
+ <value value="0xa" name="A6XX_DBGBUS_VPC"/>
+ <value value="0xb" name="A6XX_DBGBUS_TSE"/>
+ <value value="0xc" name="A6XX_DBGBUS_RAS"/>
+ <value value="0xd" name="A6XX_DBGBUS_VSC"/>
+ <value value="0xe" name="A6XX_DBGBUS_COM"/>
+ <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
+ <value value="0x11" name="A6XX_DBGBUS_A2D"/>
+ <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
+ <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
+ <value value="0x14" name="A6XX_DBGBUS_RBP"/>
+ <value value="0x15" name="A6XX_DBGBUS_DCS"/>
+ <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
+ <value value="0x17" name="A6XX_DBGBUS_CX"/>
+ <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
+ <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
+ <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
+ <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
+ <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
+ <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
+ <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
+ <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
+ <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
+ <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
+ <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
+ <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
+ <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
+ <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
+ <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
+ <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
+ <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
+ <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
+ <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
+ <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
+ <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
+ <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
+ <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
+ <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
+ <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
+ <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
+ <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
+ <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
+ <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
+ <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
+ <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
+ <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
+</enum>
+
+<!--
+Used in a6xx_a2d_bit_cntl.. the value mostly seems to correlate to the
+component type/size, so I think it relates to internal format used for
+blending? The one exception is that 16b unorm and 32b float use the
+same value... maybe 16b unorm is uncommon enough that it was just easier
+to upconvert to 32b float internally?
+
+ 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
+16b unorm: 4
+
+32b int: 7
+16b int: 6
+ 8b int: 5
+
+32b float: 4
+16b float: 3
+ -->
+<enum name="a6xx_2d_ifmt">
+ <value value="0x10" name="R2D_UNORM8"/>
+ <value value="0x7" name="R2D_INT32"/>
+ <value value="0x6" name="R2D_INT16"/>
+ <value value="0x5" name="R2D_INT8"/>
+ <value value="0x4" name="R2D_FLOAT32"/>
+ <value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x1" name="R2D_UNORM8_SRGB"/>
+ <value value="0x0" name="R2D_RAW"/>
+</enum>
+
+<enum name="a6xx_tex_type">
+ <value name="A6XX_TEX_1D" value="0"/>
+ <value name="A6XX_TEX_2D" value="1"/>
+ <value name="A6XX_TEX_CUBE" value="2"/>
+ <value name="A6XX_TEX_3D" value="3"/>
+ <value name="A6XX_TEX_BUFFER" value="4"/>
+ <doc>
+ A special buffer type for usage as the source for buffer
+ to image copies with lower alignment requirements than
+ A6XX_TEX_2D, available since A7XX.
+ </doc>
+ <value name="A6XX_TEX_IMG_BUFFER" value="5"/>
+</enum>
+
+<enum name="a6xx_ztest_mode">
+ <doc>Allow early z-test and early-lrz (if applicable)</doc>
+ <value value="0x0" name="A6XX_EARLY_Z"/>
+ <doc>Disable early z-test and early-lrz test (if applicable)</doc>
+ <value value="0x1" name="A6XX_LATE_Z"/>
+ <doc>
+ A special mode that allows early-lrz (if applicable) or early-z
+ tests, but also does late-z tests at which point it writes depth.
+
+ This mode is used when fragment can be killed (via discard or
+ sample mask) after early-z tests and it writes depth. In such case
+ depth can be written only at late-z stage, but it's ok to use
+ early-z to discard fragments.
+
+ However this mode is not compatible with:
+ - Lack of D/S attachment
+ - Stencil writes on stencil or depth test failures
+ - Per-sample shading
+ </doc>
+ <value value="0x2" name="A6XX_EARLY_Z_LATE_Z"/>
+ <doc>Not a real hw value, used internally by mesa</doc>
+ <value value="0x3" name="A6XX_INVALID_ZTEST"/>
+</enum>
+
+<enum name="a6xx_tess_spacing">
+ <value value="0x0" name="TESS_EQUAL"/>
+ <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
+ <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
+</enum>
+<enum name="a6xx_tess_output">
+ <value value="0x0" name="TESS_POINTS"/>
+ <value value="0x1" name="TESS_LINES"/>
+ <value value="0x2" name="TESS_CW_TRIS"/>
+ <value value="0x3" name="TESS_CCW_TRIS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
new file mode 100644
index 000000000000..c446a2eb1120
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
@@ -0,0 +1,600 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_cp_perfcounter_select">
+ <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="PERF_CP_SQE_IDLE"/>
+ <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="PERF_CP_PM4_DATA"/>
+ <value value="46" name="PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
+</enum>
+
+<enum name="a6xx_rbbm_perfcounter_select">
+ <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a6xx_pc_perfcounter_select">
+ <value value="0" name="PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
+ <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
+ <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="16" name="PERF_PC_INSTANCES"/>
+ <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
+ <value value="18" name="PERF_PC_DEAD_PRIM"/>
+ <value value="19" name="PERF_PC_LIVE_PRIM"/>
+ <value value="20" name="PERF_PC_VERTEX_HITS"/>
+ <value value="21" name="PERF_PC_IA_VERTICES"/>
+ <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
+ <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
+ <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
+ <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
+ <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
+ <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
+ <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
+ <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
+ <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
+ <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
+ <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
+ <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
+ <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
+ <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
+ <value value="38" name="PERF_PC_TSE_VERTEX"/>
+ <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
+</enum>
+
+<enum name="a6xx_vfd_perfcounter_select">
+ <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
+</enum>
+
+<enum name="a6xx_hlsq_perfcounter_select">
+ <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
+ <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
+ <value value="8" name="PERF_HLSQ_QUADS"/>
+ <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
+ <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="PERF_HLSQ_PIXELS"/>
+ <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+</enum>
+
+<enum name="a6xx_vpc_perfcounter_select">
+ <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
+ <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
+</enum>
+
+<enum name="a6xx_tse_perfcounter_select">
+ <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="PERF_TSE_CINVOCATION"/>
+ <value value="16" name="PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a6xx_ras_perfcounter_select">
+ <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="PERF_RAS_8X4_TILES"/>
+ <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="PERF_RAS_BLOCKS"/>
+</enum>
+
+<enum name="a6xx_uche_perfcounter_select">
+ <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="PERF_UCHE_EVICTS"/>
+ <value value="19" name="PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
+</enum>
+
+<enum name="a6xx_tp_perfcounter_select">
+ <value value="0" name="PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
+ <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
+ <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="PERF_TP_QUADS_1D"/>
+ <value value="19" name="PERF_TP_QUADS_2D"/>
+ <value value="20" name="PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="PERF_TP_QUADS_3D"/>
+ <value value="22" name="PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
+ <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
+</enum>
+
+<enum name="a6xx_sp_perfcounter_select">
+ <value value="0" name="PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="PERF_SP_GPR_READ"/>
+ <value value="58" name="PERF_SP_GPR_WRITE"/>
+ <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="PERF_SP_WORKING_EU"/>
+ <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
+</enum>
+
+<enum name="a6xx_rb_perfcounter_select">
+ <value value="0" name="PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="PERF_RB_Z_READ"/>
+ <value value="13" name="PERF_RB_Z_WRITE"/>
+ <value value="14" name="PERF_RB_C_READ"/>
+ <value value="15" name="PERF_RB_C_WRITE"/>
+ <value value="16" name="PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="PERF_RB_Z_PASS"/>
+ <value value="18" name="PERF_RB_Z_FAIL"/>
+ <value value="19" name="PERF_RB_S_FAIL"/>
+ <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="PERF_RB_3D_PIXELS"/>
+ <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
+</enum>
+
+<enum name="a6xx_vsc_perfcounter_select">
+ <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VSC_EOT_NUM"/>
+ <value value="4" name="PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a6xx_ccu_perfcounter_select">
+ <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
+ <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
+ <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="9" name="PERF_CCU_GMEM_READ"/>
+ <value value="10" name="PERF_CCU_GMEM_WRITE"/>
+ <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
+ <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
+ <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
+ <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
+ <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
+ <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
+ <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
+ <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
+ <value value="27" name="PERF_CCU_2D_RD_REQ"/>
+ <value value="28" name="PERF_CCU_2D_WR_REQ"/>
+</enum>
+
+<enum name="a6xx_lrz_perfcounter_select">
+ <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
+ <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
+ <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
+ <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
+</enum>
+
+<enum name="a6xx_cmp_perfcounter_select">
+ <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
+ <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
+ <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
+ <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
+ <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
+ <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
+ <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
+ <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
+ <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
+ <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
+ <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
new file mode 100644
index 000000000000..661b0dd0f675
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_statetype_id">
+ <value value="0" name="A7XX_TP0_NCTX_REG"/>
+ <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
+ <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
+ <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
+ <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
+ <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
+ <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
+ <value value="9" name="A7XX_TP0_TMO_DATA"/>
+ <value value="10" name="A7XX_TP0_SMO_DATA"/>
+ <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="32" name="A7XX_SP_NCTX_REG"/>
+ <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
+ <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
+ <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
+ <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
+ <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
+ <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
+ <value value="39" name="A7XX_SP_INST_DATA"/>
+ <value value="40" name="A7XX_SP_INST_DATA_1"/>
+ <value value="41" name="A7XX_SP_LB_0_DATA"/>
+ <value value="42" name="A7XX_SP_LB_1_DATA"/>
+ <value value="43" name="A7XX_SP_LB_2_DATA"/>
+ <value value="44" name="A7XX_SP_LB_3_DATA"/>
+ <value value="45" name="A7XX_SP_LB_4_DATA"/>
+ <value value="46" name="A7XX_SP_LB_5_DATA"/>
+ <value value="47" name="A7XX_SP_LB_6_DATA"/>
+ <value value="48" name="A7XX_SP_LB_7_DATA"/>
+ <value value="49" name="A7XX_SP_CB_RAM"/>
+ <value value="50" name="A7XX_SP_LB_13_DATA"/>
+ <value value="51" name="A7XX_SP_LB_14_DATA"/>
+ <value value="52" name="A7XX_SP_INST_TAG"/>
+ <value value="53" name="A7XX_SP_INST_DATA_2"/>
+ <value value="54" name="A7XX_SP_TMO_TAG"/>
+ <value value="55" name="A7XX_SP_SMO_TAG"/>
+ <value value="56" name="A7XX_SP_STATE_DATA"/>
+ <value value="57" name="A7XX_SP_HWAVE_RAM"/>
+ <value value="58" name="A7XX_SP_L0_INST_BUF"/>
+ <value value="59" name="A7XX_SP_LB_8_DATA"/>
+ <value value="60" name="A7XX_SP_LB_9_DATA"/>
+ <value value="61" name="A7XX_SP_LB_10_DATA"/>
+ <value value="62" name="A7XX_SP_LB_11_DATA"/>
+ <value value="63" name="A7XX_SP_LB_12_DATA"/>
+ <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
+ <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
+ <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
+ <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
+ <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
+ <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
+ <value value="82" name="A7XX_HLSQ_INST_RAM"/>
+ <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
+ <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
+ <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
+ <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
+ <value value="93" name="A7XX_HLSQ_STPROC_META"/>
+ <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
+ <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
+ <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
+ <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
+ <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
+ <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
+</enum>
+
+<enum name="a7xx_state_location">
+ <value value="0" name="A7XX_HLSQ_STATE"/>
+ <value value="1" name="A7XX_HLSQ_DP"/>
+ <value value="2" name="A7XX_SP_TOP"/>
+ <value value="3" name="A7XX_USPTP"/>
+ <value value="4" name="A7XX_HLSQ_DP_STR"/>
+</enum>
+
+<enum name="a7xx_pipe">
+ <value value="0" name="A7XX_PIPE_NONE"/>
+ <value value="1" name="A7XX_PIPE_BR"/>
+ <value value="2" name="A7XX_PIPE_BV"/>
+ <value value="3" name="A7XX_PIPE_LPAC"/>
+</enum>
+
+<enum name="a7xx_cluster">
+ <value value="0" name="A7XX_CLUSTER_NONE"/>
+ <value value="1" name="A7XX_CLUSTER_FE"/>
+ <value value="2" name="A7XX_CLUSTER_SP_VS"/>
+ <value value="3" name="A7XX_CLUSTER_PC_VS"/>
+ <value value="4" name="A7XX_CLUSTER_GRAS"/>
+ <value value="5" name="A7XX_CLUSTER_SP_PS"/>
+ <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
+ <value value="7" name="A7XX_CLUSTER_PS"/>
+</enum>
+
+<enum name="a7xx_debugbus_id">
+ <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
+ <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
+ <value value="3" name="A7XX_DBGBUS_RBBM"/>
+ <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
+ <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
+ <value value="7" name="A7XX_DBGBUS_HLSQ"/>
+ <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
+ <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
+ <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
+ <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
+ <value value="17" name="A7XX_DBGBUS_PC_BR"/>
+ <value value="18" name="A7XX_DBGBUS_PC_BV"/>
+ <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
+ <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
+ <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
+ <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
+ <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
+ <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
+ <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
+ <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
+ <value value="37" name="A7XX_DBGBUS_VSC"/>
+ <value value="39" name="A7XX_DBGBUS_COM_0"/>
+ <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
+ <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
+ <value value="47" name="A7XX_DBGBUS_UFC_0"/>
+ <value value="48" name="A7XX_DBGBUS_UFC_1"/>
+ <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
+ <value value="59" name="A7XX_DBGBUS_DBGC"/>
+ <value value="60" name="A7XX_DBGBUS_CX"/>
+ <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
+ <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
+ <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
+ <value value="66" name="A7XX_DBGBUS_LARC"/>
+ <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="70" name="A7XX_DBGBUS_RB_0"/>
+ <value value="71" name="A7XX_DBGBUS_RB_1"/>
+ <value value="72" name="A7XX_DBGBUS_RB_2"/>
+ <value value="73" name="A7XX_DBGBUS_RB_3"/>
+ <value value="74" name="A7XX_DBGBUS_RB_4"/>
+ <value value="75" name="A7XX_DBGBUS_RB_5"/>
+ <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="106" name="A7XX_DBGBUS_CCU_0"/>
+ <value value="107" name="A7XX_DBGBUS_CCU_1"/>
+ <value value="108" name="A7XX_DBGBUS_CCU_2"/>
+ <value value="109" name="A7XX_DBGBUS_CCU_3"/>
+ <value value="110" name="A7XX_DBGBUS_CCU_4"/>
+ <value value="111" name="A7XX_DBGBUS_CCU_5"/>
+ <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
+ <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
+ <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
+ <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
+ <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
+ <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
+ <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
+ <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
+ <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
+ <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
+ <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
+ <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
+ <value value="234" name="A7XX_DBGBUS_USP_0"/>
+ <value value="235" name="A7XX_DBGBUS_USP_1"/>
+ <value value="236" name="A7XX_DBGBUS_USP_2"/>
+ <value value="237" name="A7XX_DBGBUS_USP_3"/>
+ <value value="238" name="A7XX_DBGBUS_USP_4"/>
+ <value value="239" name="A7XX_DBGBUS_USP_5"/>
+ <value value="266" name="A7XX_DBGBUS_TP_0"/>
+ <value value="267" name="A7XX_DBGBUS_TP_1"/>
+ <value value="268" name="A7XX_DBGBUS_TP_2"/>
+ <value value="269" name="A7XX_DBGBUS_TP_3"/>
+ <value value="270" name="A7XX_DBGBUS_TP_4"/>
+ <value value="271" name="A7XX_DBGBUS_TP_5"/>
+ <value value="272" name="A7XX_DBGBUS_TP_6"/>
+ <value value="273" name="A7XX_DBGBUS_TP_7"/>
+ <value value="274" name="A7XX_DBGBUS_TP_8"/>
+ <value value="275" name="A7XX_DBGBUS_TP_9"/>
+ <value value="276" name="A7XX_DBGBUS_TP_10"/>
+ <value value="277" name="A7XX_DBGBUS_TP_11"/>
+ <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
+ <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
+ <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
+ <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
+ <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
+ <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
+ <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
+ <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
+ <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
+ <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
+ <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
+ <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
+ <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
+ <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
+ <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
+ <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
+ <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
+ <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
+ <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
+ <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
+ <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
+ <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
+ <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
+ <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
+ <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
+ <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
+ <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
+ <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
+ <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
new file mode 100644
index 000000000000..9bf78b0a854b
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
@@ -0,0 +1,1030 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 5a6ae9fc3194..7abc08635495 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -21,9 +21,9 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/>
<value name="VIZQUERY_END" value="8" variants="A2XX"/>
<value name="SC_WAIT_WC" value="9" variants="A2XX"/>
- <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX"/>
- <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX"/>
- <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX"/>
+ <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX-"/>
+ <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX-"/>
+ <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX-"/>
<!-- Not sure that these 4 events don't have the same meaning as on A5XX+ -->
<value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/>
<value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/>
@@ -31,8 +31,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/>
<value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/>
<doc>
- If A6XX_RB_SAMPLE_COUNT_CONTROL.copy is true, writes OQ Z passed
- sample counts to RB_SAMPLE_COUNT_ADDR. This writes to main
+ If A6XX_RB_SAMPLE_COUNTER_CNTL.copy is true, writes OQ Z passed
+ sample counts to RB_SAMPLE_COUNTER_BASE. This writes to main
memory, skipping UCHE.
</doc>
<value name="ZPASS_DONE" value="21"/>
@@ -98,6 +98,13 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT" value="30" variants="A5XX-"/>
<doc>
+ Flip between the primary and secondary LRZ buffers. This is used
+ for concurrent binning, so that BV can write to one buffer while
+ BR reads from the other.
+ </doc>
+ <value name="LRZ_FLIP_BUFFER" value="36" variants="A7XX"/>
+
+ <doc>
Clears based on GRAS_LRZ_CNTL configuration, could clear
fast-clear buffer or LRZ direction.
LRZ direction is stored at lrz_fc_offset + 0x200, has 1 byte which
@@ -114,6 +121,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
<value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
<value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
<value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
<value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
<value name="UNK_2C" value="44" variants="A5XX-"/>
@@ -372,7 +380,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE" value="0x30" variants="A3XX"/>
<value name="CP_LOAD_STATE4" value="0x30" variants="A4XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch enabled</doc>
- <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a"/>
+ <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a" variants="A3XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch disabled</doc>
<value name="CP_COND_INDIRECT_BUFFER_PFD" value="0x32" variants="A3XX"/>
<doc>Load a buffer with pre-fetch enabled</doc>
@@ -538,7 +546,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE6_GEOM" value="0x32" variants="A6XX-"/>
<value name="CP_LOAD_STATE6_FRAG" value="0x34" variants="A6XX-"/>
<!--
- Note: For IBO state (Image/SSBOs) which have shared state across
+ Note: For UAV state (Image/SSBOs) which have shared state across
shader stages, for 3d pipeline CP_LOAD_STATE6 is used. But for
compute shaders, CP_LOAD_STATE6_FRAG is used. Possibly they are
interchangable.
@@ -567,7 +575,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="IN_PREEMPT" value="0x0f" variants="A6XX-"/>
<!-- TODO do these exist on A5xx? -->
- <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX"/>
+ <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_MEM" value="0x74" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_REG" value="0x72" variants="A6XX-"/>
<value name="CP_WAIT_MEM_GTE" value="0x14" variants="A6XX"/>
@@ -650,6 +658,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<doc>Reset various on-chip state used for synchronization</doc>
<value name="CP_RESET_CONTEXT_STATE" value="0x1f" variants="A7XX-"/>
+
+ <doc>Invalidates the "CCHE" introduced on a740</doc>
+ <value name="CP_CCHE_INVALIDATE" value="0x3a" variants="A7XX-"/>
+
+ <value name="CP_SCOPE_CNTL" value="0x6c" variants="A7XX-"/>
</enum>
@@ -792,14 +805,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="SB6_GS_SHADER" value="0xb"/>
<value name="SB6_FS_SHADER" value="0xc"/>
<value name="SB6_CS_SHADER" value="0xd"/>
- <value name="SB6_IBO" value="0xe"/>
- <value name="SB6_CS_IBO" value="0xf"/>
+ <value name="SB6_UAV" value="0xe"/>
+ <value name="SB6_CS_UAV" value="0xf"/>
</enum>
<enum name="a6xx_state_type">
<value name="ST6_SHADER" value="0"/>
<value name="ST6_CONSTANTS" value="1"/>
<value name="ST6_UBO" value="2"/>
- <value name="ST6_IBO" value="3"/>
+ <value name="ST6_UAV" value="3"/>
</enum>
<enum name="a6xx_state_src">
<value name="SS6_DIRECT" value="0"/>
@@ -1121,39 +1134,93 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<enum name="a7xx_abs_mask_mode">
+ <value name="ABS_MASK" value="0x1"/>
+ <value name="NO_ABS_MASK" value="0x0"/>
+</enum>
+
<domain name="CP_SET_BIN_DATA5" width="32">
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex">
+ <doc>
+ A mask of bins, starting at VSC_N, whose
+ visibility is OR'd together. A value of 0 is
+ interpreted as 1 (i.e. just use VSC_N for
+ visbility) for backwards compatibility. Only
+ exists on a7xx.
+ </doc>
+ </bitfield>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes">
+ <doc>
+ If this field is 1, VSC_MASK and VSC_N are
+ ignored and instead a new ordinal immediately
+ after specifies the full 32-bit mask of bins
+ to use. The mask is "absolute" instead of
+ relative to VSC_N.
+ </doc>
+ </bitfield>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
- <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
- <!--
- a7xx adds a few more addresses to the end of the pkt
- -->
- <reg64 offset="7" name="7"/>
- <reg64 offset="9" name="9"/>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="7" name="7"/>
+ <reg64 offset="9" name="9"/>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="8" name="8"/>
+ <reg64 offset="10" name="10"/>
+ </stripe>
</domain>
<domain name="CP_SET_BIN_DATA5_OFFSET" width="32">
@@ -1164,23 +1231,42 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
stream is recorded.
</doc>
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex"/>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes"/>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
</domain>
<domain name="CP_REG_RMW" width="32">
@@ -1198,6 +1284,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
<reg32 offset="0" name="0">
<bitfield name="DST_REG" low="0" high="17" type="hex"/>
+ <bitfield name="DST_SCRATCH" pos="19" type="boolean" varset="chip" variants="A7XX-"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="23" type="boolean" varset="chip" variants="A7XX-"/>
<bitfield name="ROTATE" low="24" high="28" type="uint"/>
<bitfield name="SRC1_ADD" pos="29" type="boolean"/>
<bitfield name="SRC1_IS_REG" pos="30" type="boolean"/>
@@ -1348,6 +1437,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="SCRATCH" low="20" high="22" type="uint"/>
<!-- number of registers/dwords copied is CNT + 1. -->
<bitfield name="CNT" low="24" high="26" type="uint"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="27" type="boolean" varset="chip" variants="A7XX-"/>
</reg32>
</domain>
@@ -1655,8 +1746,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_SAMPLE_COUNT" pos="12" type="boolean"/>
<!-- Write sample count at (iova + 16) -->
<bitfield name="SAMPLE_COUNT_END_OFFSET" pos="13" type="boolean"/>
- <!-- *(iova + 8) = *(iova + 16) - *iova -->
- <bitfield name="WRITE_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
+ <!-- *(iova + 8) += *(iova + 16) - *iova -->
+ <bitfield name="WRITE_ACCUM_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
<!-- Next 4 flags are valid to set only when concurrent binning is enabled -->
<!-- Increment 16b BV counter. Valid only in BV pipe -->
@@ -1670,15 +1761,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_DST" pos="24" type="event_write_dst" addvariant="yes"/>
<!-- Writes into WRITE_DST from WRITE_SRC. RB_DONE_TS requires WRITE_ENABLED. -->
<bitfield name="WRITE_ENABLED" pos="27" type="boolean"/>
+ <bitfield name="IRQ" pos="31" type="boolean"/>
</reg32>
<stripe varset="event_write_dst" variants="EV_DST_RAM">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="1" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="PAYLOAD_0" low="0" high="31"/>
</reg32>
@@ -1773,13 +1860,23 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_MARKER" width="32" varset="chip" prefix="chip" variants="A6XX-">
<doc>Tell CP the current operation mode, indicates save and restore procedure</doc>
+ <enum name="set_marker_mode">
+ <value value="0" name="SET_RENDER_MODE"/>
+ <!-- IFPC - inter-frame power collapse -->
+ <value value="1" name="SET_IFPC_MODE"/>
+ </enum>
+ <enum name="a6xx_ifpc_mode">
+ <value value="0" name="IFPC_ENABLE"/>
+ <value value="1" name="IFPC_DISABLE"/>
+ </enum>
<enum name="a6xx_marker">
- <value value="1" name="RM6_BYPASS"/>
- <value value="2" name="RM6_BINNING"/>
- <value value="4" name="RM6_GMEM"/>
- <value value="5" name="RM6_ENDVIS"/>
- <value value="6" name="RM6_RESOLVE"/>
- <value value="7" name="RM6_YIELD"/>
+ <value value="1" name="RM6_DIRECT_RENDER"/>
+ <value value="2" name="RM6_BIN_VISIBILITY"/>
+ <value value="3" name="RM6_BIN_DIRECT"/>
+ <value value="4" name="RM6_BIN_RENDER_START"/>
+ <value value="5" name="RM6_BIN_END_OF_DRAWS"/>
+ <value value="6" name="RM6_BIN_RESOLVE"/>
+ <value value="7" name="RM6_BIN_RENDER_END"/>
<value value="8" name="RM6_COMPUTE"/>
<value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
@@ -1789,23 +1886,40 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
-->
<value value="0xd" name="RM6_IB1LIST_START"/>
<value value="0xe" name="RM6_IB1LIST_END"/>
- <!-- IFPC - inter-frame power collapse -->
- <value value="0x100" name="RM6_IFPC_ENABLE"/>
- <value value="0x101" name="RM6_IFPC_DISABLE"/>
</enum>
<reg32 offset="0" name="0">
+ <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
+ <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
+
+ <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <!-- used by preemption to determine if GMEM needs to be saved or not -->
+ <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+
+ <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
+
<!--
- NOTE: blob driver and some versions of freedreno/turnip set
- b4, which is unused (at least by current sqe fw), but interferes
- with parsing if we extend the size of the bitfield to include
- b8 (only sent by kernel mode driver). Really, the way the
- parsing works in the firmware, only b0-b3 are considered, but
- if b8 is set, the low bits are interpreted differently. To
- model this, without getting confused by spurious b4, this is
- described as two overlapping bitfields:
- -->
- <bitfield name="MODE" low="0" high="8" type="a6xx_marker"/>
- <bitfield name="MARKER" low="0" high="3" type="a6xx_marker"/>
+ CP_SET_MARKER is used with these bits to create a
+ critical section around a workaround for ray tracing.
+ The workaround happens after BVH building, and appears
+ to invalidate the RTU's BVH node cache. It makes sure
+ that only one of BR/BV/LPAC is executing the
+ workaround at a time, and no draws using RT on BV/LPAC
+ are executing while the workaround is executed on BR (or
+ vice versa, that no draws on BV/BR using RT are executed
+ while the workaround executes on LPAC), by
+ hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS.
+ The blob usage is:
+
+ CP_SET_MARKER(RT_WA_START)
+ ... workaround here ...
+ CP_SET_MARKER(RT_WA_END)
+ ...
+ CP_SET_MARKER(SHADER_USES_RT)
+ CP_DRAW_INDX(...) or CP_EXEC_CS(...)
+ -->
+ <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/>
</reg32>
</domain>
@@ -1832,9 +1946,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
If concurrent binning is disabled then BR also does binning so it will also
write the "real" registers in BR.
-->
- <value value="8" name="DRAW_STRM_ADDRESS"/>
- <value value="9" name="DRAW_STRM_SIZE_ADDRESS"/>
- <value value="10" name="PRIM_STRM_ADDRESS"/>
+ <value value="8" name="VSC_PIPE_DATA_DRAW_BASE"/>
+ <value value="9" name="VSC_SIZE_BASE"/>
+ <value value="10" name="VSC_PIPE_DATA_PRIM_BASE"/>
<value value="11" name="UNK_STRM_ADDRESS"/>
<value value="12" name="UNK_STRM_SIZE_ADDRESS"/>
@@ -1935,11 +2049,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
a bitmask of which modes pass the test.
-->
- <!-- RM6_BINNING -->
+ <!-- RM6_BIN_VISIBILITY -->
<bitfield name="BINNING" pos="25" variants="RENDER_MODE" type="boolean"/>
<!-- all others -->
<bitfield name="GMEM" pos="26" variants="RENDER_MODE" type="boolean"/>
- <!-- RM6_BYPASS -->
+ <!-- RM6_DIRECT_RENDER -->
<bitfield name="SYSMEM" pos="27" variants="RENDER_MODE" type="boolean"/>
<bitfield name="BV" pos="25" variants="THREAD_MODE" type="boolean"/>
@@ -2014,10 +2128,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_AMBLE" width="32">
<doc>
- Used by the userspace and kernel drivers to set various IB's
- which are executed during context save/restore for handling
- state that isn't restored by the context switch routine itself.
- </doc>
+ Used by the userspace and kernel drivers to set various IB's
+ which are executed during context save/restore for handling
+ state that isn't restored by the context switch routine itself.
+ </doc>
<enum name="amble_type">
<value name="PREAMBLE_AMBLE_TYPE" value="0">
<doc>Executed unconditionally when switching back to the context.</doc>
@@ -2087,12 +2201,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="UNK_EVENT_WRITE" value="0x4"/>
<doc>
Tracks GRAS_LRZ_CNTL::GREATER, GRAS_LRZ_CNTL::DIR, and
- GRAS_LRZ_DEPTH_VIEW with previous values, and if one of
+ GRAS_LRZ_VIEW_INFO with previous values, and if one of
the following is true:
- GRAS_LRZ_CNTL::GREATER has changed
- GRAS_LRZ_CNTL::DIR has changed, the old value is not
CUR_DIR_GE, and the new value is not CUR_DIR_DISABLED
- - GRAS_LRZ_DEPTH_VIEW has changed
+ - GRAS_LRZ_VIEW_INFO has changed
then it does a LRZ_FLUSH with GRAS_LRZ_CNTL::ENABLE
forced to 1.
Only exists in a650_sqe.fw.
@@ -2207,7 +2321,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_MEM_TO_SCRATCH_MEM" width="32">
<doc>
- Best guess is that it is a faster way to fetch all the VSC_STATE registers
+ Best guess is that it is a faster way to fetch all the VSC_CHANNEL_VISIBILITY registers
and keep them in a local scratch memory instead of fetching every time
when skipping IBs.
</doc>
@@ -2255,7 +2369,18 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
<bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
<bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
- <bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
+ <bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/>
+ <bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SCOPE_CNTL" width="32">
+ <enum name="cp_scope">
+ <value value="0" name="INTERRUPTS"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DISABLE_PREEMPTION" pos="0" type="boolean"/>
+ <bitfield low="28" high="31" name="SCOPE" type="cp_scope"/>
</reg32>
</domain>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d2c8c46bb041..4e5ac0f25dea 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -26,6 +26,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
+ <reg32 offset="0x001b0" name="CTRL_5"/>
<reg32 offset="0x00034" name="LANE_CFG0"/>
<reg32 offset="0x00038" name="LANE_CFG1"/>
<reg32 offset="0x0003c" name="PLL_CNTRL"/>
@@ -191,11 +192,24 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x01b0" name="COMMON_STATUS_ONE"/>
<reg32 offset="0x01b4" name="COMMON_STATUS_TWO"/>
<reg32 offset="0x01b8" name="BAND_SEL_CAL"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01bc to 0x01cc, however
+ we keep only one register map. That's not a problem, so far,
+ because this register is not used. The register map should be split
+ once it is going to be used. Comment out the code to prevent
+ any misuse due to the change in the offset.
<reg32 offset="0x01bc" name="ICODE_ACCUM_STATUS_LOW"/>
+ <reg32 offset="0x01cc" name="ICODE_ACCUM_STATUS_LOW"/>
+ -->
<reg32 offset="0x01c0" name="ICODE_ACCUM_STATUS_HIGH"/>
<reg32 offset="0x01c4" name="FD_OUT_LOW"/>
<reg32 offset="0x01c8" name="FD_OUT_HIGH"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01cc to 0x01bc, however
+ we keep only one register map. See above comment.
<reg32 offset="0x01cc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ <reg32 offset="0x01bc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ -->
<reg32 offset="0x01d0" name="PLL_MISC_CONFIG"/>
<reg32 offset="0x01d4" name="FLL_CONFIG"/>
<reg32 offset="0x01d8" name="FLL_FREQ_ACQ_TIME"/>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index 3926485bb197..a409404627c7 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,6 +11,7 @@ import collections
import argparse
import time
import datetime
+import re
class Error(Exception):
def __init__(self, message):
@@ -877,13 +878,14 @@ The rules-ng-ng source files this header was generated from are:
""")
maxlen = 0
for filepath in p.xml_files:
- maxlen = max(maxlen, len(filepath))
+ new_filepath = re.sub("^.+drivers","drivers",filepath)
+ maxlen = max(maxlen, len(new_filepath))
for filepath in p.xml_files:
- pad = " " * (maxlen - len(filepath))
+ pad = " " * (maxlen - len(new_filepath))
filesize = str(os.path.getsize(filepath))
filesize = " " * (7 - len(filesize)) + filesize
filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")")
+ print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
if p.copyright_year:
current_year = str(datetime.date.today().year)
print()
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 8ee00f59ca82..fcb2a7517377 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
- struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
@@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm)
lcdif->drm = drm;
drm->dev_private = lcdif;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lcdif->base = devm_ioremap_resource(drm->dev, res);
+ lcdif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 59020862cf65..0b756da2fec2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -90,20 +91,15 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
static struct drm_framebuffer *
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
@@ -215,7 +211,6 @@ static int mxsfb_load(struct drm_device *drm,
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
- struct resource *res;
int ret;
mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
@@ -226,8 +221,7 @@ static int mxsfb_load(struct drm_device *drm,
drm->dev_private = mxsfb;
mxsfb->devdata = devdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ mxsfb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxsfb->base))
return PTR_ERR(mxsfb->base);
@@ -361,6 +355,15 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_free;
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't kick out existing framebuffers\n");
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7b863355c5c6..385d24530d1e 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -2,6 +2,7 @@
ccflags-y += -I $(src)/include
ccflags-y += -I $(src)/include/nvkm
ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)/nvkm/subdev/gsp
ccflags-y += -I $(src)
# NVKM - HW resource manager
@@ -68,5 +69,6 @@ nouveau-y += nv17_fence.o
nouveau-y += nv50_fence.o
nouveau-y += nv84_fence.o
nouveau-y += nvc0_fence.o
+nouveau-y += gv100_fence.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7b3e979c51ec..d1587639ebb0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select IOMMU_API
select FW_LOADER
select FW_CACHE if PM_SLEEP
@@ -94,7 +94,6 @@ config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
- depends on MMU
depends on STAGING
select HMM_MIRROR
select MMU_NOTIFIER
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 67146f1e8482..c063756eaea3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -768,9 +768,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
disp->image[nv_crtc->index] = NULL;
}
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin_del(&nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -1303,6 +1301,7 @@ nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
+ struct nouveau_cli *cli = &nouveau_drm(dev)->client;
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
struct drm_plane *primary;
@@ -1336,20 +1335,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
- &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
- NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- }
- if (ret)
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
- }
+ ret = nouveau_bo_new_map(cli, NOUVEAU_GEM_DOMAIN_VRAM, 64 * 64 * 4, &nv_crtc->cursor.nvbo);
+ if (ret)
+ return ret;
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 28be2912ff74..d5049dee4b8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -9,11 +9,13 @@ nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
nouveau-y += dispnv50/corec57d.o
+nouveau-y += dispnv50/coreca7d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcca7d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -31,6 +33,7 @@ nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
nouveau-y += dispnv50/headc57d.o
+nouveau-y += dispnv50/headca7d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
@@ -39,6 +42,7 @@ nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/wndwc67e.o
+nouveau-y += dispnv50/wndwca7e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f045515696cb..c6331bf97582 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GB202_DISP_CORE_CHANNEL_DMA, 0, coreca7d_new },
{ AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index f75088186fba..aa07a3ad5dfd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -7,7 +7,10 @@
struct nv50_core {
const struct nv50_core_func *func;
+ struct nv50_disp *disp;
+
struct nv50_dmac chan;
+
bool assign_windows;
};
@@ -18,6 +21,7 @@ struct nv50_core_func {
int (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
+ u32 caps_class;
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -70,4 +74,6 @@ int corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int coreca7d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index ce2cb78bbdd3..4b947b67a844 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -165,6 +165,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
return -ENOMEM;
core->func = func;
+ core->disp = disp;
ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7f637b8830be..83eec2f091f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -105,7 +105,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int ret;
ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
- GV100_DISP_CAPS, NULL, 0, &disp->caps);
+ disp->core->func->caps_class, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
@@ -162,6 +162,7 @@ corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 421d0d57e1d8..39be576eadcb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
@@ -63,6 +64,7 @@ corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
new file mode 100644
index 000000000000..171727be400e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/class.h>
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+#include <nouveau_bo.h>
+
+static int
+coreca7d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ const u64 ntfy_addr = core->disp->sync->offset + NV50_DISP_CORE_NTFY;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &core->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5 + (ntfy ? 5 + 2 : 0));
+ if (ret)
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NVCA7D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVCA7D, UPDATE,
+ NVDEF(NVCA7D, UPDATE, RELEASE_ELV, TRUE) |
+ NVDEF(NVCA7D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVCA7D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
+}
+
+static int
+coreca7d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = &core->chan.push;
+ const u32 windows = 8, heads = 4;
+ int ret, i;
+
+ ret = PUSH_WAIT(push, windows * 6 + heads * 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE),
+
+ WINDOW_SET_PHYSICAL(i), BIT(i));
+ }
+
+ for (i = 0; i < heads; i++) {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_TILE_MASK(i), BIT(i));
+
+ PUSH_MTHD(push, NVCA7D, TILE_SET_TILE_SIZE(i), 0);
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+coreca7d = {
+ .init = coreca7d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .caps_class = GB202_DISP_CAPS,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = coreca7d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headca7d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcca7d,
+#endif
+};
+
+int
+coreca7d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&coreca7d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 5936b6b3b15d..deb6af40ef32 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -509,6 +509,10 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
if (ret)
return ret;
+ /* No CTXDMAs on Blackwell. */
+ if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA)
+ return 0;
+
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4823f1fde2dd..75a2009e8193 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -94,6 +94,7 @@ void nv50_crc_atomic_clr(struct nv50_head *);
extern const struct nv50_crc_func crc907d;
extern const struct nv50_crc_func crcc37d;
extern const struct nv50_crc_func crcc57d;
+extern const struct nv50_crc_func crcca7d;
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
struct nv50_crc {};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
new file mode 100644
index 000000000000..912f59aebe87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "crcc37d.h"
+#include "core.h"
+#include "head.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+crcca7d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, ctx ? 3 : 2);
+ if (ret)
+ return ret;
+
+ if (ctx) {
+ const u32 crc_hi = upper_32_bits(ctx->mem.addr);
+ const u32 crc_lo = lower_32_bits(ctx->mem.addr);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CRC(i), crc_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ADDRESS_LO, crc_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, ENABLE));
+ } else {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, DISABLE));
+ }
+
+ return 0;
+}
+
+static int
+crcca7d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int primary_crc, ret;
+
+ if (!source) {
+ ret = PUSH_WAIT(push, 1);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), 0);
+
+ return crcca7d_set_ctx(head, NULL);
+ }
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(or);
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF;
+ break;
+ default:
+ break;
+ }
+
+ ret = crcca7d_set_ctx(head, ctx);
+ if (ret)
+ return ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVVAL(NVCA7D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, primary_crc) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE));
+
+ return 0;
+}
+
+const struct nv50_crc_func
+crcca7d = {
+ .set_src = crcca7d_set_src,
+ .set_ctx = crcca7d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 31d8b2e4791d..557bd05240fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GB202_DISP_CURSOR, 0, cursc37a_new },
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 504cb3f2054b..e97e39abf3a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -279,6 +279,16 @@ nv50_dmac_create(struct nouveau_drm *drm,
if (syncbuf < 0)
return 0;
+ /* No CTXDMAs on Blackwell. */
+ if (disp->oclass >= GB202_DISP) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in some fake object handles.
+ */
+ dmac->sync.handle = NV50_DISP_HANDLE_SYNCBUF;
+ dmac->vram.handle = NV50_DISP_HANDLE_VRAM;
+ return 0;
+ }
+
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
@@ -775,10 +785,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u32 max_ac_packet;
- struct {
- struct nvif_outp_infoframe_v0 infoframe;
- u8 data[17];
- } args = { 0 };
+ DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
+ const u8 data_len = __member_size(args->data);
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
@@ -815,29 +823,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
return;
/* AVI InfoFrame. */
- args.infoframe.version = 0;
- args.infoframe.head = nv_crtc->index;
+ args->version = 0;
+ args->head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
} else {
size = 0;
}
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size);
/* Vendor InfoFrame. */
- memset(&args.data, 0, sizeof(args.data));
+ memset(args->data, 0, data_len);
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
else
size = 0;
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size);
nv_encoder->hdmi.enabled = true;
}
@@ -1831,7 +1839,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
backlight = nv_connector->backlight;
if (backlight && backlight->uses_dpcd)
drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
- (u16)backlight->dev->props.brightness);
+ backlight->dev->props.brightness);
#endif
break;
@@ -2810,10 +2818,7 @@ nv50_display_destroy(struct drm_device *dev)
nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
- nouveau_bo_unmap(disp->sync);
- if (disp->sync)
- nouveau_bo_unpin(disp->sync);
- nouveau_bo_fini(disp->sync);
+ nouveau_bo_unpin_del(&disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2845,20 +2850,7 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_unpin(disp->sync);
- }
- if (ret)
- nouveau_bo_fini(disp->sync);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d7c74cc43ba5..3dd742b4f823 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -577,6 +577,7 @@ nv50_head_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
+ head->disp = disp;
head->base.index = index;
if (disp->disp->object.oclass < GF110_DISP)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index e9d17037ffcf..8bd2fcb1eff5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -13,6 +13,8 @@
struct nv50_head {
const struct nv50_head_func *func;
+ struct nv50_disp *disp;
+
struct nouveau_crtc base;
struct nv50_crc crc;
struct nv50_lut olut;
@@ -98,4 +100,7 @@ int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
+bool headc57d_olut(struct nv50_head *, struct nv50_head_atom *, int size);
+
+extern const struct nv50_head_func headca7d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index fde4087e7691..3f8ba495de8f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -182,7 +182,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headca7d.c b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
new file mode 100644
index 000000000000..eeaeb15aa664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+headca7d_display_id(struct nv50_head *head, u32 display_id)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DISPLAY_ID(i, 0), display_id);
+
+ return 0;
+}
+
+static int
+headca7d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ switch (asyh->or.depth) {
+ case 6:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444;
+ break;
+ case 5:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ case 2:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444;
+ break;
+ case 0:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+
+ return 0;
+}
+
+static int
+headca7d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+
+ return 0;
+}
+
+static int
+headca7d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVCA7D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+
+ return 0;
+}
+
+static int
+headca7d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 curs_hi = upper_32_bits(asyh->curs.offset);
+ const u32 curs_lo = lower_32_bits(asyh->curs.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 7);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(i, 0), curs_hi);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ADDRESS_LO, curs_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ return 0;
+}
+
+static int
+headca7d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 olut_hi = upper_32_bits(asyh->olut.offset);
+ const u32 olut_lo = lower_32_bits(asyh->olut.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_OLUT(i), olut_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ADDRESS_LO, olut_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVCA7D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff);
+
+ return 0;
+}
+
+static int
+headca7d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL, STRUCTURE, PROGRESSIVE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ return 0;
+}
+
+static int
+headca7d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+headca7d = {
+ .view = headca7d_view,
+ .mode = headca7d_mode,
+ .olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_identity = true,
+ .olut_size = 1024,
+ .olut_set = headca7d_olut_set,
+ .olut_clr = headca7d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headca7d_curs_set,
+ .curs_clr = headca7d_curs_clr,
+ .dither = headca7d_dither,
+ .procamp = headca7d_procamp,
+ .or = headca7d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
+ .display_id = headca7d_display_id,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index 566fbddfc8d7..53c9ab6c138b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GB202_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index f6be426dd525..11d5b923d6e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -556,14 +556,24 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return ret;
if (wndw->ctxdma.parent) {
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
+ } else {
+ /* No CTXDMAs on Blackwell. */
+ if (asyw->visible) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in a fake object handle.
+ */
+ asyw->image.handle[0] = NV50_DISP_HANDLE_WNDW_CTX(0);
+ }
}
-
- if (asyw->visible)
- asyw->image.handle[0] = ctxdma->object.handle;
}
ret = drm_gem_plane_helper_prepare_fb(plane, state);
@@ -901,6 +911,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GB202_DISP_WINDOW_CHANNEL_DMA, 0, wndwca7e_new },
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 76a6ae5d5652..90d100514bef 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -134,6 +134,9 @@ int wndwc57e_csc_clr(struct nv50_wndw *);
int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwca7e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 50a7b97d37a2..554c4f91f8be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
+#include <nvif/class.h>
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
new file mode 100644
index 000000000000..0d8e9a9d1a57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7e.h>
+
+#include <nouveau_bo.h>
+
+static int
+wndwca7e_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 iso0_hi = upper_32_bits(asyw->image.offset[0]);
+ const u32 iso0_lo = lower_32_bits(asyw->image.offset[0]);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret, kind;
+
+ if (asyw->image.kind)
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR;
+ else
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH;
+
+ ret = PUSH_WAIT(push, 17);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ISO(0), iso0_hi);
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ADDRESS_LO, iso0_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, TARGET, PHYSICAL_NVM) |
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, KIND, kind) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE,
+ NVVAL(NVCA7E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVCA7E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVCA7E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVCA7E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVCA7E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVCA7E, SET_POINT_IN(0),
+ NVVAL(NVCA7E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVCA7E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_IN,
+ NVVAL(NVCA7E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVCA7E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_OUT,
+ NVVAL(NVCA7E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVCA7E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 ilut_hi = upper_32_bits(asyw->xlut.i.offset);
+ const u32 ilut_lo = lower_32_bits(asyw->xlut.i.offset);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ILUT, ilut_hi,
+
+ SET_SURFACE_ADDRESS_LO_ILUT,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ADDRESS_LO, ilut_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_ILUT_CONTROL,
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ const u64 ntfy_addr = disp->sync->offset + asyw->ntfy.offset;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_NOTIFIER_CONTROL,
+ NVVAL(NVCA7E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken));
+
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwca7e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .ntfy_set = wndwca7e_ntfy_set,
+ .ntfy_clr = wndwca7e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwca7e_ilut_set,
+ .xlut_clr = wndwca7e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwca7e_image_set,
+ .image_clr = wndwca7e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwca7e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwca7e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
new file mode 100644
index 000000000000..cccdeca72002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/clc36f.h>
+
+static int
+gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 8);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) |
+ NVDEF(NVC36F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT));
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx;
+ int ret;
+
+ ret = nv84_fence_context_new(chan);
+ if (ret)
+ return ret;
+
+ fctx = chan->fence;
+ fctx->base.emit32 = gv100_fence_emit32;
+ fctx->base.sync32 = gv100_fence_sync32;
+ return 0;
+}
+
+int
+gv100_fence_create(struct nouveau_drm *drm)
+{
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ ret = nv84_fence_create(drm);
+ if (ret)
+ return ret;
+
+ priv = drm->fence;
+ priv->base.context_new = gv100_fence_context_new;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
new file mode 100644
index 000000000000..8735dda4c8a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc36f_h_
+#define _clc36f_h_
+
+#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
+#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+#define NVC36F_SEM_ADDR_LO (0x0000005c)
+#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
+#define NVC36F_SEM_ADDR_HI (0x00000060)
+#define NVC36F_SEM_ADDR_HI_OFFSET 7:0
+#define NVC36F_SEM_PAYLOAD_LO (0x00000064)
+#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0
+#define NVC36F_SEM_PAYLOAD_HI (0x00000068)
+#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0
+#define NVC36F_SEM_EXECUTE (0x0000006c)
+#define NVC36F_SEM_EXECUTE_OPERATION 2:0
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
+#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
+#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION 30:27
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
+#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
+#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
+#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
+#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006
+#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
new file mode 100644
index 000000000000..092aebe9551c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc97b_h_
+#define _clc97b_h_
+
+// dma opcode instructions
+#define NVC97B_DMA
+#define NVC97B_DMA_OPCODE 31:29
+#define NVC97B_DMA_OPCODE_METHOD 0x00000000
+#define NVC97B_DMA_OPCODE_JUMP 0x00000001
+#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NVC97B_DMA_METHOD_COUNT 27:18
+#define NVC97B_DMA_METHOD_OFFSET 15:2
+#define NVC97B_DMA_DATA 31:0
+#define NVC97B_DMA_DATA_NOP 0x00000000
+#define NVC97B_DMA_JUMP_OFFSET 15:2
+#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+#endif // _clc97b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
new file mode 100644
index 000000000000..0fec6fc21d44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7d_h_
+#define _clca7d_h_
+
+// class methods
+#define NVCA7D_UPDATE (0x00000200)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING 21:20
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_REASON 19:12
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS 24:24
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NVCA7D_UPDATE_RELEASE_ELV 0:0
+#define NVCA7D_UPDATE_RELEASE_ELV_FALSE (0x00000000)
+#define NVCA7D_UPDATE_RELEASE_ELV_TRUE (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN 8:4
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN 28:28
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL (0x0000020C)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS (0x00000218)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+
+#define NVCA7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK 7:0
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NVCA7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER 3:0
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i))
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE 8:8
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW 31:0
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000)
+
+#define NVCA7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F)
+#define NVCA7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER 3:3
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN 28:24
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS 5:4
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE 10:8
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12
+#define NVCA7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_DISPLAY_ID_CODE 31:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE 7:0
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080)
+#define NVCA7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0
+#define NVCA7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE 3:2
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0
+
+#define NVCA7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200)
+#define NVCA7D_TILE_SET_TILE_SIZE_START 14:0
+#define NVCA7D_TILE_SET_TILE_SIZE_WIDTH 30:16
+
+#endif // _clca7d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
new file mode 100644
index 000000000000..ebfb2e48a4f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7e_h_
+#define _clca7e_h_
+
+// class methods
+#define NVCA7E_SET_NOTIFIER_CONTROL (0x00000220)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7E_SET_SIZE (0x00000224)
+#define NVCA7E_SET_SIZE_WIDTH 15:0
+#define NVCA7E_SET_SIZE_HEIGHT 31:16
+#define NVCA7E_SET_STORAGE (0x00000228)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVCA7E_SET_PARAMS (0x0000022C)
+#define NVCA7E_SET_PARAMS_FORMAT 7:0
+#define NVCA7E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVCA7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVCA7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVCA7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVCA7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVCA7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVCA7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVCA7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVCA7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVCA7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_SWAP_UV 19:19
+#define NVCA7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001)
+#define NVCA7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVCA7E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVCA7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVCA7E_SET_POINT_IN_X 15:0
+#define NVCA7E_SET_POINT_IN_Y 31:16
+#define NVCA7E_SET_SIZE_IN (0x00000298)
+#define NVCA7E_SET_SIZE_IN_WIDTH 15:0
+#define NVCA7E_SET_SIZE_IN_HEIGHT 31:16
+#define NVCA7E_SET_SIZE_OUT (0x000002A4)
+#define NVCA7E_SET_SIZE_OUT_WIDTH 15:0
+#define NVCA7E_SET_SIZE_OUT_HEIGHT 31:16
+#define NVCA7E_SET_PRESENT_CONTROL (0x00000308)
+#define NVCA7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL (0x00000440)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR 1:1
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE 3:2
+#define NVCA7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL_SIZE 18:8
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001)
+
+#endif // _clca7e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
new file mode 100644
index 000000000000..c9d74bd95e0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb100_dev_hshub_base_h__
+#define __gb100_dev_hshub_base_h__
+
+#define NV_PFB_HSHUB0 0x00870fff:0x00870000
+
+#define NV_PFB_HSHUB 0x00000FFF:0x00000000 /* RW--D */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00000E50 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00000E54 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO 0x000006C0 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI 0x000006C4 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gb100_dev_hshub_base_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
new file mode 100644
index 000000000000..4d0bb8e14298
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb10b_dev_fb_h__
+#define __gb10b_dev_fb_h__
+
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO 0x008a1d58 /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xffffff00 /* RW--V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI 0x008a1d5c /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000fffff /* RW--V */
+
+#endif // __gb10b_dev_fb_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
new file mode 100644
index 000000000000..b09f04b31738
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_ce_h__
+#define __gb202_dev_ce_h__
+
+#define NV_CE_GRCE_MASK 0x001040d8 /* C--4R */
+#define NV_CE_GRCE_MASK_VALUE 9:0 /* C--VF */
+#define NV_CE_GRCE_MASK_VALUE_INIT 0x00f /* C---V */
+
+#endif // __gb202_dev_ce_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
new file mode 100644
index 000000000000..ed359cb528fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_therm_h__
+#define __gb202_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x00ad00bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gb202_dev_therm_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
new file mode 100644
index 000000000000..52171b412aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_falcon_v4_h__
+#define __gh100_dev_falcon_v4_h__
+
+#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN 13:13 /* R--VF */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_LOCK 0x00000001 /* R---V */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_UNLOCK 0x00000000 /* R---V */
+
+#endif // __gh100_dev_falcon_v4_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
new file mode 100644
index 000000000000..819f09465952
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fb_h_
+#define __gh100_dev_fb_h_
+
+#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00100A34 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00100A38 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gh100_dev_fb_h_
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
new file mode 100644
index 000000000000..e9507242cae5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fsp_pri_h__
+#define __gh100_dev_fsp_pri_h__
+
+#define NV_PFSP 0x8F3FFF:0x8F0000 /* RW--D */
+
+#define NV_PFSP_MSGQ_HEAD(i) (0x008F2c80+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_HEAD_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_MSGQ_TAIL(i) (0x008F2c84+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_TAIL_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFSP_QUEUE_HEAD(i) (0x008F2c00+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_QUEUE_TAIL(i) (0x008F2c04+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */
+
+#endif // __gh100_dev_fsp_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
new file mode 100644
index 000000000000..6707e0e3b96b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_mmu_h__
+#define __gh100_dev_mmu_h__
+
+#define NV_MMU_PTE /* ----G */
+#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
+#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */
+#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */
+#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */
+#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */
+#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */
+#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */
+#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */
+#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */
+
+#define NV_MMU_VER3_PDE /* ----G */
+#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PDE__SIZE 8
+
+#define NV_MMU_VER3_DUAL_PDE /* ----G */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
+#define NV_MMU_VER3_DUAL_PDE__SIZE 16
+
+#define NV_MMU_VER3_PTE /* ----G */
+#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */
+#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */
+#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PTE__SIZE 8
+
+#endif // __gh100_dev_mmu_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
new file mode 100644
index 000000000000..8ff4663168d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_riscv_pri_h__
+#define __gh100_dev_riscv_pri_h__
+
+#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RW-4R */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED 4:4 /* R-IVF */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_INIT 0x00000001 /* R-I-V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */
+
+#endif // __gh100_dev_riscv_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
new file mode 100644
index 000000000000..49b4816cb00b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_therm_h__
+#define __gh100_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x000200bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gh100_dev_therm_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
new file mode 100644
index 000000000000..12b49e9894a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_xtl_ep_pri_h__
+#define __gh100_dev_xtl_ep_pri_h__
+
+#define NV_EP_PCFGM 0x92FFF:0x92000 /* RW--D */
+
+#endif // __gh100_dev_xtl_ep_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
new file mode 100644
index 000000000000..1a891bd33fa3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_pri_nv_xal_ep_h__
+#define __gh100_pri_nv_xal_ep_h__
+
+#define NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT 0x000010
+#define NV_XAL_EP_BAR0_WINDOW_BASE 21:0
+#define NV_XAL_EP_BAR0_WINDOW 0x0010fd40
+
+#endif // __gh100_pri_nv_xal_ep_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h
new file mode 100644
index 000000000000..c329a29068d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVIF_CHAN_H__
+#define __NVIF_CHAN_H__
+#include "push.h"
+
+struct nvif_chan {
+ const struct nvif_chan_func {
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ } push;
+
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size,
+ bool no_prefetch);
+ void (*kick)(struct nvif_chan *);
+ int (*post)(struct nvif_chan *, u32 gpptr, u32 pbptr);
+ u32 post_size;
+ } gpfifo;
+
+ struct {
+ int (*release)(struct nvif_chan *, u64 addr, u32 data);
+ } sem;
+ } *func;
+
+ struct {
+ struct nvif_map map;
+ } userd;
+
+ struct {
+ struct nvif_map map;
+ u32 cur;
+ u32 max;
+ int free;
+ } gpfifo;
+
+ struct {
+ struct nvif_map map;
+ u64 addr;
+ } sema;
+
+ struct nvif_push push;
+
+ struct nvif_user *usermode;
+ u32 doorbell_token;
+};
+
+int nvif_chan_dma_wait(struct nvif_chan *, u32 push_nr);
+
+void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *);
+int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr);
+void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch);
+int nvif_chan_gpfifo_post(struct nvif_chan *);
+
+void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch);
+void nvif_chan506f_gpfifo_kick(struct nvif_chan *);
+
+int nvif_chan906f_ctor_(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *);
+u32 nvif_chan906f_read_get(struct nvif_chan *);
+u32 nvif_chan906f_gpfifo_read_get(struct nvif_chan *);
+int nvif_chan906f_gpfifo_post(struct nvif_chan *, u32 gpptr, u32 pbptr);
+
+int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size);
+int nvif_chan906f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr);
+int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index ea937fa7bc55..ea8267e0d8da 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,8 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
#define NV_DEVICE_INFO_V0_ADA 0x0e
+#define NV_DEVICE_INFO_V0_HOPPER 0x0f
+#define NV_DEVICE_INFO_V0_BLACKWELL 0x10
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 824e052dcc25..ff6823cb2cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -57,12 +57,15 @@
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+#define BLACKWELL_INLINE_TO_MEMORY_A 0x0000cd40
#define NV04_DISP /* cl0046.h */ 0x00000046
#define VOLTA_USERMODE_A 0x0000c361
#define TURING_USERMODE_A 0x0000c461
#define AMPERE_USERMODE_A 0x0000c561
+#define HOPPER_USERMODE_A 0x0000c661
+#define BLACKWELL_USERMODE_A 0x0000c761
#define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069
#define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369
@@ -85,6 +88,9 @@
#define TURING_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c46f
#define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f
#define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f
+#define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f
+#define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f
+#define BLACKWELL_CHANNEL_GPFIFO_B 0x0000ca6f
#define NV50_DISP /* if0010.h */ 0x00005070
#define G82_DISP /* if0010.h */ 0x00008270
@@ -102,8 +108,10 @@
#define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670
#define AD102_DISP /* if0010.h */ 0x0000c770
+#define GB202_DISP 0x0000ca70
#define GV100_DISP_CAPS 0x0000c373
+#define GB202_DISP_CAPS 0x0000ca73
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -118,6 +126,7 @@
#define GV100_DISP_CURSOR /* if0014.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* if0014.h */ 0x0000c57a
#define GA102_DISP_CURSOR /* if0014.h */ 0x0000c67a
+#define GB202_DISP_CURSOR 0x0000ca7a
#define NV50_DISP_OVERLAY /* if0014.h */ 0x0000507b
#define G82_DISP_OVERLAY /* if0014.h */ 0x0000827b
@@ -128,6 +137,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c57b
#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c67b
+#define GB202_DISP_WINDOW_IMM_CHANNEL_DMA 0x0000ca7b
#define NV50_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000827c
@@ -153,6 +163,7 @@
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
+#define GB202_DISP_CORE_CHANNEL_DMA 0x0000ca7d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
@@ -164,6 +175,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c57e
#define GA102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c67e
+#define GB202_DISP_WINDOW_CHANNEL_DMA 0x0000ca7e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -189,16 +201,25 @@
#define TURING_A /* cl9097.h */ 0x0000c597
+#define AMPERE_A 0x0000c697
#define AMPERE_B /* cl9097.h */ 0x0000c797
#define ADA_A /* cl9097.h */ 0x0000c997
+#define HOPPER_A 0x0000cb97
+
+#define BLACKWELL_A 0x0000cd97
+#define BLACKWELL_B 0x0000ce97
+
#define NV74_BSP 0x000074b0
+#define NVB8B0_VIDEO_DECODER 0x0000b8b0
#define NVC4B0_VIDEO_DECODER 0x0000c4b0
#define NVC6B0_VIDEO_DECODER 0x0000c6b0
#define NVC7B0_VIDEO_DECODER 0x0000c7b0
#define NVC9B0_VIDEO_DECODER 0x0000c9b0
+#define NVCDB0_VIDEO_DECODER 0x0000cdb0
+#define NVCFB0_VIDEO_DECODER 0x0000cfb0
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
@@ -227,10 +248,14 @@
#define TURING_DMA_COPY_A 0x0000c5b5
#define AMPERE_DMA_COPY_A 0x0000c6b5
#define AMPERE_DMA_COPY_B 0x0000c7b5
+#define HOPPER_DMA_COPY_A 0x0000c8b5
+#define BLACKWELL_DMA_COPY_A 0x0000c9b5
+#define BLACKWELL_DMA_COPY_B 0x0000cab5
#define NVC4B7_VIDEO_ENCODER 0x0000c4b7
#define NVC7B7_VIDEO_ENCODER 0x0000c7b7
#define NVC9B7_VIDEO_ENCODER 0x0000c9b7
+#define NVCFB7_VIDEO_ENCODER 0x0000cfb7
#define FERMI_DECOMPRESS 0x000090b8
@@ -246,15 +271,25 @@
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
#define TURING_COMPUTE_A 0x0000c5c0
+#define AMPERE_COMPUTE_A 0x0000c6c0
#define AMPERE_COMPUTE_B 0x0000c7c0
#define ADA_COMPUTE_A 0x0000c9c0
+#define HOPPER_COMPUTE_A 0x0000cbc0
+#define BLACKWELL_COMPUTE_A 0x0000cdc0
+#define BLACKWELL_COMPUTE_B 0x0000cec0
#define NV74_CIPHER 0x000074c1
+#define NVB8D1_VIDEO_NVJPG 0x0000b8d1
#define NVC4D1_VIDEO_NVJPG 0x0000c4d1
#define NVC9D1_VIDEO_NVJPG 0x0000c9d1
+#define NVCDD1_VIDEO_NVJPG 0x0000cdd1
+#define NVCFD1_VIDEO_NVJPG 0x0000cfd1
+#define NVB8FA_VIDEO_OFA 0x0000b8fa
#define NVC6FA_VIDEO_OFA 0x0000c6fa
#define NVC7FA_VIDEO_OFA 0x0000c7fa
#define NVC9FA_VIDEO_OFA 0x0000c9fa
+#define NVCDFA_VIDEO_OFA 0x0000cdfa
+#define NVCFFA_VIDEO_OFA 0x0000cffa
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d205b6af46a..1b32dc701f61 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -16,7 +16,7 @@ struct nvif_object {
u32 handle;
s32 oclass;
void *priv; /*XXX: hack */
- struct {
+ struct nvif_map {
void __iomem *ptr;
u64 size;
} map;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 6d3a8a3d2087..a493fababe3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -31,6 +31,12 @@ struct nvif_push {
void (*kick)(struct nvif_push *push);
struct nvif_mem mem;
+ u64 addr;
+
+ struct {
+ u32 get;
+ u32 max;
+ } hw;
u32 *bgn;
u32 *cur;
@@ -41,7 +47,7 @@ struct nvif_push {
static inline __must_check int
PUSH_WAIT(struct nvif_push *push, u32 size)
{
- if (push->cur + size >= push->end) {
+ if (push->cur + size > push->end) {
int ret = push->wait(push, size);
if (ret)
return ret;
@@ -55,7 +61,11 @@ PUSH_WAIT(struct nvif_push *push, u32 size)
static inline int
PUSH_KICK(struct nvif_push *push)
{
- push->kick(push);
+ if (push->cur != push->bgn) {
+ push->kick(push);
+ push->bgn = push->cur;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
index cc2866bc8b0a..79df71de98d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
@@ -7,6 +7,7 @@
#ifndef PUSH906F_SUBC
// Host methods
#define PUSH906F_SUBC_NV906F 0
+#define PUSH906F_SUBC_NVC36F 0
// Twod
#define PUSH906F_SUBC_NV902D 3
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
new file mode 100644
index 000000000000..c8d6b6319134
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC97B_H__
+#define __NVIF_PUSHC97B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc97b.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC97B_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NVC97B_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NVC97B, DMA, OPCODE, METHOD) | \
+ NVVAL(NVC97B, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NVC97B, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 46afb877a296..99579e7b9376 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -46,7 +46,10 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
+ GH100 = 0x180,
AD100 = 0x190,
+ GB10x = 0x1a0,
+ GB20x = 0x1b0,
} card_type;
u32 chipset;
u8 chiprev;
@@ -77,6 +80,13 @@ struct nvkm_device {
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
+enum nvkm_bar_id {
+ NVKM_BAR_INVALID = 0,
+ NVKM_BAR0_PRI,
+ NVKM_BAR1_FB,
+ NVKM_BAR2_INST,
+};
+
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
@@ -85,8 +95,8 @@ struct nvkm_device_func {
int (*init)(struct nvkm_device *);
void (*fini)(struct nvkm_device *, bool suspend);
int (*irq)(struct nvkm_device *);
- resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
- resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id);
+ resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
bool cpu_coherent;
};
@@ -124,6 +134,9 @@ struct nvkm_device *nvkm_device_find(u64 name);
_temp; \
})
+#define NVKM_RD32_(p,o,dr) nvkm_rd32((p), (o) + (dr))
+#define NVKM_RD32(p,A...) DRF_RV(NVKM_RD32_, (p), 0, ##A)
+
void nvkm_device_del(struct nvkm_device **);
struct nvkm_device_oclass {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 9d2a1abf64f9..d92ffd17b729 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FSP , struct nvkm_fsp , fsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn)
@@ -29,7 +30,7 @@ NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
-NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 20)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
@@ -43,9 +44,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_INST(NVKM_ENGINE_OFA , struct nvkm_engine , ofa, 2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 3e8db8280e2a..7903d7470d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -87,5 +87,4 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
-int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index be508f65b280..96c16cfccf16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -78,9 +78,6 @@ struct nvkm_fifo {
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar1;
-
- struct mutex mutex;
- struct list_head list;
} userd;
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 8145796ffc61..a2333cfe6955 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -55,5 +55,4 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
-int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 8d2e170883e1..ca83caa55157 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -13,7 +13,5 @@ struct nvkm_nvdec {
int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 018c58fc32ba..1f6eef13f872 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -13,6 +13,4 @@ struct nvkm_nvenc {
int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
deleted file mode 100644
index 80b7933a789e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_H__
-#define __NVKM_NVJPG_H__
-#include <core/engine.h>
-
-int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
deleted file mode 100644
index e72e2115333b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_H__
-#define __NVKM_OFA_H__
-#include <core/engine.h>
-
-int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 5b798a1a313d..e0d777a933e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -102,6 +102,9 @@ int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb202_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
new file mode 100644
index 000000000000..8a3dbb1cbb46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_H__
+#define __NVKM_FSP_H__
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+struct nvkm_fsp {
+ const struct nvkm_fsp_func *func;
+ struct nvkm_subdev subdev;
+
+ struct nvkm_falcon falcon;
+};
+
+bool nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *, u32 hash_size, u32 pkey_size, u32 sig_size);
+int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+
+int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb202_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 746e126c3ecf..226c7ec56b8e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -17,6 +17,9 @@ struct nvkm_gsp_mem {
dma_addr_t addr;
};
+int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *);
+void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *);
+
struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem lvl0;
struct nvkm_gsp_mem lvl1;
@@ -31,6 +34,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
+/**
+ * DOC: GSP message handling policy
+ *
+ * When sending a GSP RPC command, there can be multiple cases of handling
+ * the GSP RPC messages, which are the reply of GSP RPC commands, according
+ * to the requirement of the callers and the nature of the GSP RPC commands.
+ *
+ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
+ * caller after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+ * RPC message after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
+ * discard the reply before returning to the caller.
+ *
+ */
+enum nvkm_gsp_rpc_reply_policy {
+ NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+ NVKM_GSP_RPC_REPLY_RECV,
+ NVKM_GSP_RPC_REPLY_POLL,
+};
+
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
@@ -42,6 +68,9 @@ struct nvkm_gsp {
const struct firmware *load;
const struct firmware *unload;
} booter;
+
+ const struct firmware *fmc;
+
const struct firmware *bl;
const struct firmware *rm;
} fws;
@@ -89,6 +118,15 @@ struct nvkm_gsp {
struct {
struct nvkm_gsp_mem fw;
+ u8 *hash;
+ u8 *pkey;
+ u8 *sig;
+
+ struct nvkm_gsp_mem args;
+ } fmc;
+
+ struct {
+ struct nvkm_gsp_mem fw;
u32 code_offset;
u32 data_offset;
u32 manifest_offset;
@@ -107,6 +145,7 @@ struct nvkm_gsp {
struct sg_table sgt;
struct nvkm_gsp_radix3 radix3;
struct nvkm_gsp_mem meta;
+ struct sg_table fbsr;
} sr;
struct {
@@ -186,31 +225,7 @@ struct nvkm_gsp {
u8 tpcs;
} gr;
- const struct nvkm_gsp_rm {
- void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
- void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
- void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
-
- void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
- void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
-
- void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
- void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
-
- int (*rm_free)(struct nvkm_gsp_object *);
-
- int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
- void (*client_dtor)(struct nvkm_gsp_client *);
-
- int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
- void (*device_dtor)(struct nvkm_gsp_device *);
-
- int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
- nvkm_gsp_event_func, struct nvkm_gsp_event *);
- void (*event_dtor)(struct nvkm_gsp_event *);
- } *rm;
+ struct nvkm_rm *rm;
struct {
struct mutex mutex;
@@ -248,16 +263,19 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp)
return gsp && (gsp->fws.rm || gsp->fw.img);
}
+#include <rm/rm.h>
+
static inline void *
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
- return gsp->rm->rpc_get(gsp, fn, argc);
+ return gsp->rm->api->rpc->get(gsp, fn, argc);
}
static inline void *
-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
{
- return gsp->rm->rpc_push(gsp, argv, wait, repc);
+ return gsp->rm->api->rpc->push(gsp, argv, policy, repc);
}
static inline void *
@@ -268,13 +286,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
if (IS_ERR_OR_NULL(argv))
return argv;
- return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+ return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
}
static inline int
-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy)
{
- void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
@@ -285,19 +304,19 @@ nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
static inline void
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
{
- gsp->rm->rpc_done(gsp, repv);
+ gsp->rm->api->rpc->done(gsp, repv);
}
static inline void *
nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
{
- return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+ return object->client->gsp->rm->api->ctrl->get(object, cmd, argc);
}
static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
- return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+ return object->client->gsp->rm->api->ctrl->push(object, argv, repc);
}
static inline void *
@@ -328,7 +347,7 @@ nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_ctrl_done(object, repv);
+ object->client->gsp->rm->api->ctrl->done(object, repv);
}
static inline void *
@@ -343,7 +362,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
object->parent = parent;
object->handle = handle;
- argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+ argv = gsp->rm->api->alloc->get(object, oclass, argc);
if (IS_ERR_OR_NULL(argv)) {
object->client = NULL;
return argv;
@@ -355,7 +374,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
static inline void *
nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
+ void *repv = object->client->gsp->rm->api->alloc->push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -377,7 +396,7 @@ nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_alloc_done(object, repv);
+ object->client->gsp->rm->api->alloc->done(object, repv);
}
static inline int
@@ -395,39 +414,29 @@ nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 ar
static inline int
nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
{
- if (object->client)
- return object->client->gsp->rm->rm_free(object);
+ if (object->client) {
+ int ret = object->client->gsp->rm->api->alloc->free(object);
+ object->client = NULL;
+ return ret;
+ }
return 0;
}
-static inline int
-nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- if (WARN_ON(!gsp->rm))
- return -ENOSYS;
-
- return gsp->rm->client_ctor(gsp, client);
-}
-
-static inline void
-nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- if (client->gsp)
- client->gsp->rm->client_dtor(client);
-}
+int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
+void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
static inline int
nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
{
- return client->gsp->rm->device_ctor(client, device);
+ return client->gsp->rm->api->device->ctor(client, device);
}
static inline void
nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
{
if (device->object.client)
- device->object.client->gsp->rm->device_dtor(device);
+ device->object.client->gsp->rm->api->device->dtor(device);
}
static inline int
@@ -459,7 +468,9 @@ static inline int
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
- return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+ struct nvkm_rm *rm = device->object.client->gsp->rm;
+
+ return rm->api->device->event.ctor(device, handle, id, func, event);
}
static inline void
@@ -468,7 +479,7 @@ nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
struct nvkm_gsp_device *device = event->device;
if (device)
- device->object.client->gsp->rm->event_dtor(event);
+ device->object.client->gsp->rm->api->device->event.dtor(event);
}
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
@@ -479,5 +490,8 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index e10cbd9203ec..db835cf7b8ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -24,11 +24,6 @@ struct nvkm_instmem {
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
struct nvkm_memory *ramfc;
-
- struct {
- struct sg_table fbsr;
- bool fbsr_valid;
- } rm;
};
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
@@ -41,4 +36,5 @@ int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nv
int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gh100_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 935b1cacd528..abcb0dbcde70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -8,7 +8,7 @@ struct nvkm_vma {
struct list_head head;
struct rb_node tree;
u64 addr;
- u64 size:50;
+ u64 size;
bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
@@ -73,6 +73,7 @@ struct nvkm_vmm {
struct nvkm_gsp_object object;
struct nvkm_vma *rsvd;
+ bool external;
} rm;
};
@@ -165,4 +166,5 @@ int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 3c103101d5fc..112b674ed9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -50,6 +50,7 @@ int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gh100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
deleted file mode 100644
index 7a3fc023072d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
-#define __src_common_sdk_nvidia_inc_class_cl0000_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0000_ALLOC_PARAMETERS {
- NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
- NvU32 processID;
- char processName[NV_PROC_NAME_MAX_LENGTH];
-} NV0000_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
deleted file mode 100644
index e4de36d63666..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
-#define __src_common_sdk_nvidia_inc_class_cl0005_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0005_ALLOC_PARAMETERS {
- NvHandle hParentClient;
- NvHandle hSrcResource;
-
- NvV32 hClass;
- NvV32 notifyIndex;
- NV_DECLARE_ALIGNED(NvP64 data, 8);
-} NV0005_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
deleted file mode 100644
index 8868118e47d6..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
-#define __src_common_sdk_nvidia_inc_class_cl0080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0080_ALLOC_PARAMETERS {
- NvU32 deviceId;
- NvHandle hClientShare;
- NvHandle hTargetClient;
- NvHandle hTargetDevice;
- NvV32 flags;
- NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
- NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
- NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
- NvV32 vaMode;
-} NV0080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
deleted file mode 100644
index 9040ea5608a0..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV2080_ALLOC_PARAMETERS {
- NvU32 subDeviceId;
-} NV2080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
deleted file mode 100644
index ba659d6477d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_NOTIFIERS_HOTPLUG (1)
-
-#define NV2080_NOTIFIERS_DP_IRQ (7)
-
-#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
-#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
-
-#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
-
-#define NV2080_ENGINE_TYPE_BSP (0x00000013)
-#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
-
-#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
-#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
-
-#define NV2080_ENGINE_TYPE_SW (0x00000022)
-
-#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
-
-#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
-#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
-
-#define NV2080_ENGINE_TYPE_OFA (0x00000033)
-
-typedef struct {
- NvU32 plugDisplayMask;
- NvU32 unplugDisplayMask;
-} Nv2080HotplugNotification;
-
-typedef struct Nv2080DpIrqNotificationRec {
- NvU32 displayId;
-} Nv2080DpIrqNotification;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
deleted file mode 100644
index 9eb780a1ac72..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
-#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
-
-#define NV01_MEMORY_LIST_FBMEM (0x00000082)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
deleted file mode 100644
index f1d21776e395..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
-#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FERMI_VASPACE_A (0x000090f1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
deleted file mode 100644
index b8f32576cfaa..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVC0B5_ALLOCATION_PARAMETERS {
- NvU32 version;
- NvU32 engineType;
-} NVC0B5_ALLOCATION_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
deleted file mode 100644
index 58b3ba7badf1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
- NvBool bDscSupported;
- NvU32 encoderColorFormatMask;
- NvU32 lineBufferSizeKB;
- NvU32 rateBufferSizeKB;
- NvU32 bitsPerPixelPrecision;
- NvU32 maxNumHztSlices;
- NvU32 lineBufferBitDepth;
-} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
deleted file mode 100644
index 596f2ea8344e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 flags2;
-} NV0073_CTRL_DFP_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
-#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
-#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
-#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
-
-typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numELDSize;
- NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
- NvU32 maxFreqSupported;
- NvU32 ctrl;
- NvU32 deviceEntry;
-} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool enable;
-} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
-
-typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
- NvU32 displayMask;
- NvU32 sorType;
-} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU8 sorExcludeMask;
- NvU32 slaveDisplayId;
- NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
- NvBool bIs2Head1Or;
- NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NvU8 reservedSorMask;
- NvU32 flags;
-} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
-
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
deleted file mode 100644
index bae4b1997736..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
+++ /dev/null
@@ -1,335 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
-
-typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool bAddrOnly;
- NvU32 cmd;
- NvU32 addr;
- NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
- NvU32 size;
- NvU32 replyType;
- NvU32 retryTimeMs;
-} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
-
-#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 cmd;
- NvU32 data;
- NvU32 err;
- NvU32 retryTimeMs;
- NvU32 eightLaneDpcdBaseAddr;
-} NV0073_CTRL_DP_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_UNUSED 3:3
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET 22:19
-#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
-
-#define NV0073_CTRL_MAX_LANES 8U
-
-typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numLanes;
- NvU32 data[NV0073_CTRL_MAX_LANES];
-} NV0073_CTRL_DP_LANE_DATA_PARAMS;
-
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
-
-#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 mute;
-} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 preferredDisplayId;
-
- NvBool force;
- NvBool useBFM;
-
- NvU32 displayIdAssigned;
- NvU32 allDisplayMask;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 sorIndex;
- NvU32 dpLink;
-
- NvBool bEnableOverride;
- NvBool bMST;
- NvU32 singleHeadMultistreamMode;
- NvU32 hBlankSym;
- NvU32 vBlankSym;
- NvU32 colorFormat;
- NvBool bEnableTwoHeadOneOr;
-
- struct {
- NvU32 slotStart;
- NvU32 slotEnd;
- NvU32 PBN;
- NvU32 Timeslice;
- NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
- NvU32 singleHeadMSTPipeline;
- NvBool bEnableAudioOverRightPanel;
- } MST;
-
- struct {
- NvBool bEnhancedFraming;
- NvU32 tuSize;
- NvU32 waterMark;
- NvU32 actualPclkHz; // deprecated -Use MvidWarParams
- NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
- NvBool bEnableAudioOverRightPanel;
- struct {
- NvU32 activeCnt;
- NvU32 activeFrac;
- NvU32 activePolarity;
- NvBool mvidWarEnabled;
- struct {
- NvU32 actualPclkHz;
- NvU32 linkClkFreqHz;
- } MvidWarParams;
- } Legacy;
- } SST;
-} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
- NvU32 subDeviceInstance;
-} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
-
-typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 sorIndex;
- NvU32 maxLinkRate;
- NvU32 dpVersionsSupported;
- NvU32 UHBRSupported;
- NvBool bIsMultistreamSupported;
- NvBool bIsSCEnabled;
- NvBool bHasIncreasedWatermarkLimits;
- NvBool bIsPC2Disabled;
- NvBool isSingleHeadMSTSupported;
- NvBool bFECSupported;
- NvBool bIsTrainPhyRepeater;
- NvBool bOverrideLinkBw;
- NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
-} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
-
-#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
- // In
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
-
- // Out
- NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
- NvU8 linkBwCount;
-} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
deleted file mode 100644
index 954958dcf834..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
+++ /dev/null
@@ -1,216 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 bufferSize;
- NvU32 flags;
- NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
-} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_MAX_CONNECTORS 4U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 DDCPartners;
- NvU32 count;
- struct {
- NvU32 index;
- NvU32 type;
- NvU32 location;
- } data[NV0073_CTRL_MAX_CONNECTORS];
- NvU32 platform;
-} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 enable;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 mute;
-} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 headMask;
-} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 transmitControl;
- NvU32 packetSize;
- NvU32 targetHead;
- NvBool bUsePsrHeadforSdp;
- NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
-} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
-
-#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 index;
- NvU32 type;
- NvU32 protocol;
- NvU32 ditherType;
- NvU32 ditherAlgo;
- NvU32 location;
- NvU32 rootPortId;
- NvU32 dcbIndex;
- NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
- NvBool bIsLitByVbios;
- NvBool bIsDispDynamic;
-} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 brightness;
- NvBool bUncalibrated;
-} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 caps;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
deleted file mode 100644
index d69cef3c01fd..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 numHeads;
-} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayMask;
- NvU32 displayMaskDDC;
-} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 displayMask;
- NvU32 retryTimeMs;
-} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 flags;
- NvU32 displayId;
-} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
-
-#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
deleted file mode 100644
index 3db099e62364..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
- NvU32 totalVFs;
- NvU32 firstVfOffset;
- NvU32 vfFeatureMask;
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
- NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
- NvBool bSriovEnabled;
- NvBool bSriovHeavyEnabled;
- NvBool bEmulateVFBar0TlbInvalidationRegister;
- NvBool bClientRmAllocatedCtxBuffer;
-} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
deleted file mode 100644
index ed01df925573..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
deleted file mode 100644
index b5b7631de99b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
- NvU32 BoardID;
- char chipSKU[4];
- char chipSKUMod[2];
- char project[5];
- char projectSKU[5];
- char CDP[6];
- char projectSKUMod[2];
- NvU32 businessCycle;
-} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
deleted file mode 100644
index fe912d2bd183..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
- NvU32 size;
-} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
-
-#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
deleted file mode 100644
index 87bc4ff92ce1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
- NvU32 event;
- NvU32 action;
- NvBool bNotifyState;
- NvU32 info32;
- NvU16 info16;
-} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
-
-#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
deleted file mode 100644
index 68c81f9f803c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
-
-typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
- NV_DECLARE_ALIGNED(NvU64 base, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NV_DECLARE_ALIGNED(NvU64 reserved, 8);
- NvU32 performance;
- NvBool supportCompressed;
- NvBool supportISO;
- NvBool bProtected;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
- NvU32 numFBRegions;
- NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
deleted file mode 100644
index bc0f63699b06..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
-
-typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
- NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
- NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 numPbdmas;
- char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
-} NV2080_CTRL_FIFO_DEVICE_ENTRY;
-
-typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
- NvU32 baseIndex;
- NvU32 numEntries;
- NvBool bMore;
- // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
- NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
-} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
deleted file mode 100644
index 29d7a1052142..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
- NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 physAttr;
- NvU16 bufferId;
- NvU8 bInitialize;
- NvU8 bNonmapped;
-} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
-
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
-
-#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
-
-#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
- NvU32 engineType;
- NvHandle hClient;
- NvU32 ChID;
- NvHandle hChanClient;
- NvHandle hObject;
- NvHandle hVirtMemory;
- NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 entryCount;
- // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
- NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
-} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
- NvU32 gpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 tpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 zcullMask;
-} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
-
-#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
-
-typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
- NvU32 index;
- NvU32 flags;
- NvU32 length;
- NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
-} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
deleted file mode 100644
index 59f8895bc5d7..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
-} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
deleted file mode 100644
index e11b2dbe5288..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
- NvU32 feHwSysCap;
- NvU32 windowPresentMask;
- NvBool bFbRemapperEnabled;
- NvU32 numHeads;
- NvBool bPrimaryVga;
- NvU32 i2cPort;
- NvU32 internalDispActiveMask;
-} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
-
-#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
-
-#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
-
-typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
- NvU32 size;
- NvU32 alignment;
-} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
- NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
-} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
- NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
-} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
- NvU32 engDesc;
- NvU32 ctxAttr;
- NvU32 ctxBufferSize;
- NvU32 addrSpaceList;
- NvU32 registerBase;
-} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
-#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
-
-#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
- NvU32 numConstructedFalcons;
- NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
-} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
- NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
- NvU32 instMemAddrSpace;
- NvU32 instMemCpuCacheAttr;
-} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
- NvU32 addressSpace;
- NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NvU32 cacheSnoop;
- NvU32 hclass;
- NvU32 channelInstance;
- NvBool valid;
-} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
-
-typedef enum NV2080_INTR_CATEGORY {
- NV2080_INTR_CATEGORY_DEFAULT = 0,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
- NV2080_INTR_CATEGORY_RUNLIST = 3,
- NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
- NV2080_INTR_CATEGORY_UVM_OWNED = 5,
- NV2080_INTR_CATEGORY_UVM_SHARED = 6,
- NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
-} NV2080_INTR_CATEGORY;
-
-typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
- NvU8 subtreeStart;
- NvU8 subtreeEnd;
-} NV2080_INTR_CATEGORY_SUBTREE_MAP;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
- NvU16 engineIdx;
- NvU32 pmcIntrMask;
- NvU32 vectorStall;
- NvU32 vectorNonStall;
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
- NvU32 tableLen;
- NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
- NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
- NvU32 fbsrType;
- NvU32 numRegions;
- NvHandle hClient;
- NvHandle hSysMem;
- NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
- NvBool bEnteringGcoffState;
-} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
- NvU32 fbsrType;
- NvHandle hClient;
- NvHandle hVidMem;
- NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
-
-typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
- NvU32 status;
- NvU16 backLightDataSize;
- NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
-} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
deleted file mode 100644
index 977e59818533..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GMMU_FMT_MAX_LEVELS 6U
-
-#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
-
-typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
- /*!
- * [in] GPU sub-device handle - this API only supports unicast.
- * Pass 0 to use subDeviceId instead.
- */
- NvHandle hSubDevice;
-
- /*!
- * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
- */
- NvU32 subDeviceId;
-
- /*!
- * [in] Page size (VA coverage) of the level to reserve.
- * This need not be a leaf (page table) page size - it can be
- * the coverage of an arbitrary level (including root page directory).
- */
- NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
-
- /*!
- * [in] First GPU virtual address of the range to reserve.
- * This must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
-
- /*!
- * [in] Last GPU virtual address of the range to reserve.
- * This (+1) must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
-
- /*!
- * [in] Number of PDE levels to copy.
- */
- NvU32 numLevelsToCopy;
-
- /*!
- * [in] Per-level information.
- */
- struct {
- /*!
- * Physical address of this page level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
-
- /*!
- * Size in bytes allocated for this level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-
- /*!
- * Aperture in which this page level instance resides.
- */
- NvU32 aperture;
-
- /*!
- * Page shift corresponding to the level
- */
- NvU8 pageShift;
- } levels[GMMU_FMT_MAX_LEVELS];
-} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
deleted file mode 100644
index 684045796232..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
- NvBool bEnable;
- NvBool bSkipSubmit;
-} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
-
-#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_BIND_PARAMS {
- NvU32 engineType;
-} NVA06F_CTRL_BIND_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
deleted file mode 100644
index 5c5a004a8031..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
-#define __src_common_sdk_nvidia_inc_nvlimits_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MAX_SUBDEVICES 8
-
-#define NV_PROC_NAME_MAX_LENGTH 100U
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
deleted file mode 100644
index 51b5591c603e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvos_h__
-#define __src_common_sdk_nvidia_inc_nvos_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVOS02_FLAGS_PHYSICALITY 7:4
-#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
-#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
-#define NVOS02_FLAGS_LOCATION 11:8
-#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
-#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
-#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
-#define NVOS02_FLAGS_COHERENCY 15:12
-#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
-#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
-#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
-#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
-#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
-#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
-#define NVOS02_FLAGS_ALLOC 17:16
-#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
-#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
-#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
-#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
-
-#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
-#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
-#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
-
-#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
-#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
-
-#define NVOS02_FLAGS_MAPPING 31:30
-#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
-#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
-
-#define NV01_EVENT_CLIENT_RM (0x04000000)
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // Note that core channel has only one instance
- // while all others have two (one per head).
- NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
- NvU32 offset; // Initial offset for put/get, usually zero.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
-
- NvU32 flags;
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
-
-} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // All PIO channels have two instances (one per head).
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
-} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances;
- NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
-} NV_BSP_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
- NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
-} NV_MSENC_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
- NvU32 engineInstance;
-} NV_NVJPG_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
-} NV_OFA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 index;
- NvV32 flags;
- NvU64 vaSize NV_ALIGN_BYTES(8);
- NvU64 vaStartInternal NV_ALIGN_BYTES(8);
- NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
- NvU32 bigPageSize;
- NvU64 vaBase NV_ALIGN_BYTES(8);
-} NV_VASPACE_ALLOCATION_PARAMETERS;
-
-#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
deleted file mode 100644
index 0e32e71e123f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * msgqTxHeader -- TX queue data structure
- * @version: the version of this structure, must be 0
- * @size: the size of the entire queue, including this header
- * @msgSize: the padded size of queue element, 16 is minimum
- * @msgCount: the number of elements in this queue
- * @writePtr: head index of this queue
- * @flags: 1 = swap the RX pointers
- * @rxHdrOff: offset of readPtr in this structure
- * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
- * beginning of this structure
- *
- * The command queue is a queue of RPCs that are sent from the driver to the
- * GSP. The status queue is a queue of messages/responses from GSP-RM to the
- * driver. Although the driver allocates memory for both queues, the command
- * queue is owned by the driver and the status queue is owned by GSP-RM. In
- * addition, the headers of the two queues must not share the same 4K page.
- *
- * Each queue is prefixed with this data structure. The idea is that a queue
- * and its header are written to only by their owner. That is, only the
- * driver writes to the command queue and command queue header, and only the
- * GSP writes to the status (receive) queue and its header.
- *
- * This is enforced by the concept of "swapping" the RX pointers. This is
- * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
- * where the where the tail pointer of its status queue.
- *
- * When the driver writes a new RPC to the command queue, it updates writePtr.
- * When it reads a new message from the status queue, it updates readPtr. In
- * this way, the GSP knows when a new command is in the queue (it polls
- * writePtr) and it knows how much free space is in the status queue (it
- * checks readPtr). The driver never cares about how much free space is in
- * the status queue.
- *
- * As usual, producers write to the head pointer, and consumers read from the
- * tail pointer. When head == tail, the queue is empty.
- *
- * So to summarize:
- * command.writePtr = head of command queue
- * command.readPtr = tail of status queue
- * status.writePtr = head of status queue
- * status.readPtr = tail of command queue
- */
-typedef struct
-{
- NvU32 version; // queue version
- NvU32 size; // bytes, page aligned
- NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
- NvU32 msgCount; // number of entries in queue
- NvU32 writePtr; // message id of next slot
- NvU32 flags; // if set it means "i want to swap RX"
- NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
- NvU32 entryOff; // Offset of entries from start of backing store.
-} msgqTxHeader;
-
-/**
- * msgqRxHeader - RX queue data structure
- * @readPtr: tail index of the other queue
- *
- * Although this is a separate struct, it could easily be merged into
- * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
- * from the beginning of msgqTxHeader.
- */
-typedef struct
-{
- NvU32 readPtr; // message id of last message read
-} msgqRxHeader;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
deleted file mode 100644
index 83cf1b2c15a3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
-#define __src_common_uproc_os_common_include_libos_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef NvU64 LibosAddress;
-
-typedef enum {
- LIBOS_MEMORY_REGION_NONE,
- LIBOS_MEMORY_REGION_CONTIGUOUS,
- LIBOS_MEMORY_REGION_RADIX3
-} LibosMemoryRegionKind;
-
-typedef enum {
- LIBOS_MEMORY_REGION_LOC_NONE,
- LIBOS_MEMORY_REGION_LOC_SYSMEM,
- LIBOS_MEMORY_REGION_LOC_FB
-} LibosMemoryRegionLoc;
-
-typedef struct
-{
- LibosAddress id8; // Id tag.
- LibosAddress pa; // Physical address.
- LibosAddress size; // Size of memory area.
- NvU8 kind; // See LibosMemoryRegionKind above.
- NvU8 loc; // See LibosMemoryRegionLoc above.
-} LibosMemoryRegionInitArgument;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
deleted file mode 100644
index 73213bdfcbda..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
-#define GSP_FW_SR_META_REVISION 2
-
-typedef struct
-{
- //
- // Magic
- // Use for verification by Booter
- //
- NvU64 magic; // = GSP_FW_SR_META_MAGIC;
-
- //
- // Revision number
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- //
- NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
-
- //
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
- //
- NvU64 sysmemAddrOfSuspendResumeData;
- NvU64 sizeOfSuspendResumeData;
-
- // ---- Members for crypto ops across S/R ---------------------------
-
- //
- // HMAC over the entire GspFwSRMeta structure (including padding)
- // with the hmac field itself zeroed.
- //
- NvU8 hmac[32];
-
- // Hash over GspFwWprMeta structure
- NvU8 wprMetaHash[32];
-
- // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
- NvU8 heapFreeListHash[32];
-
- // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
- NvU8 dataHash[32];
-
- //
- // Pad structure to exactly 256 bytes (1 DMA chunk).
- // Padding initialized to zero.
- //
- NvU32 padding[24];
-
-} GspFwSRMeta;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
deleted file mode 100644
index a2e141e4b459..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- // Magic
- // BL to use for verification (i.e. Booter locked it in WPR2)
- NvU64 magic; // = 0xdc3aae21371a60b3;
-
- // Revision number of Booter-BL-Sequencer handoff interface
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- NvU64 revision; // = 1;
-
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
-
- NvU64 sysmemAddrOfRadix3Elf;
- NvU64 sizeOfRadix3Elf;
-
- NvU64 sysmemAddrOfBootloader;
- NvU64 sizeOfBootloader;
-
- // Offsets inside bootloader image needed by Booter
- NvU64 bootloaderCodeOffset;
- NvU64 bootloaderDataOffset;
- NvU64 bootloaderManifestOffset;
-
- union
- {
- // Used only at initial boot
- struct
- {
- NvU64 sysmemAddrOfSignature;
- NvU64 sizeOfSignature;
- };
-
- //
- // Used at suspend/resume to read GspFwHeapFreeList
- // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
- //
- struct
- {
- NvU32 gspFwHeapFreeListWprOffset;
- NvU32 unused0;
- NvU64 unused1;
- };
- };
-
- // ---- Members describing FB layout --------------------------------
- NvU64 gspFwRsvdStart;
-
- NvU64 nonWprHeapOffset;
- NvU64 nonWprHeapSize;
-
- NvU64 gspFwWprStart;
-
- // GSP-RM to use to setup heap.
- NvU64 gspFwHeapOffset;
- NvU64 gspFwHeapSize;
-
- // BL to use to find ELF for jump
- NvU64 gspFwOffset;
- // Size is sizeOfRadix3Elf above.
-
- NvU64 bootBinOffset;
- // Size is sizeOfBootloader above.
-
- NvU64 frtsOffset;
- NvU64 frtsSize;
-
- NvU64 gspFwWprEnd;
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 fbSize;
-
- // ---- Other members -----------------------------------------------
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 vgaWorkspaceOffset;
- NvU64 vgaWorkspaceSize;
-
- // Boot count. Used to determine whether to load the firmware image.
- NvU64 bootCount;
-
- // TODO: the partitionRpc* fields below do not really belong in this
- // structure. The values are patched in by the partition bootstrapper
- // when GSP-RM is booted in a partition, and this structure was a
- // convenient place for the bootstrapper to access them. These should
- // be moved to a different comm. mechanism between the bootstrapper
- // and the GSP-RM tasks.
-
- union
- {
- struct
- {
- // Shared partition RPC memory (physical address)
- NvU64 partitionRpcAddr;
-
- // Offsets relative to partitionRpcAddr
- NvU16 partitionRpcRequestOffset;
- NvU16 partitionRpcReplyOffset;
-
- // Code section and dataSection offset and size.
- NvU32 elfCodeOffset;
- NvU32 elfDataOffset;
- NvU32 elfCodeSize;
- NvU32 elfDataSize;
-
- // Used during GSP-RM resume to check for revocation
- NvU32 lsUcodeVersion;
- };
-
- struct
- {
- // Pad for the partitionRpc* fields, plus 4 bytes
- NvU32 partitionRpcPadding[4];
-
- // CrashCat (contiguous) buffer size/location - occupies same bytes as the
- // elf(Code|Data)(Offset|Size) fields above.
- // TODO: move to GSP_FMC_INIT_PARAMS
- NvU64 sysmemAddrOfCrashReportQueue;
- NvU32 sizeOfCrashReportQueue;
-
- // Pad for the lsUcodeVersion field
- NvU32 lsUcodeVersionPadding[1];
- };
- };
-
- // Number of VF partitions allocating sub-heaps from the WPR heap
- // Used during boot to ensure the heap is adequately sized
- NvU8 gspFwHeapVfPartitionCount;
-
- // Pad structure to exactly 256 bytes. Can replace padding with additional
- // fields without incrementing revision. Padding initialized to 0.
- NvU8 padding[7];
-
- // BL to use for verification (i.e. Booter says OK to boot)
- NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
-} GspFwWprMeta;
-
-#define GSP_FW_WPR_META_REVISION 1
-#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
deleted file mode 100644
index 4eff473e8990..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- //
- // Version 1
- // Version 2
- // Version 3 = for Partition boot
- // Version 4 = for eb riscv boot
- // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
- //
- NvU32 version; // structure version
- NvU32 bootloaderOffset;
- NvU32 bootloaderSize;
- NvU32 bootloaderParamOffset;
- NvU32 bootloaderParamSize;
- NvU32 riscvElfOffset;
- NvU32 riscvElfSize;
- NvU32 appVersion; // Changelist number associated with the image
- //
- // Manifest contains information about Monitor and it is
- // input to BR
- //
- NvU32 manifestOffset;
- NvU32 manifestSize;
- //
- // Monitor Data offset within RISCV image and size
- //
- NvU32 monitorDataOffset;
- NvU32 monitorDataSize;
- //
- // Monitor Code offset withtin RISCV image and size
- //
- NvU32 monitorCodeOffset;
- NvU32 monitorCodeSize;
- NvU32 bIsMonitorEnabled;
- //
- // Swbrom Code offset within RISCV image and size
- //
- NvU32 swbromCodeOffset;
- NvU32 swbromCodeSize;
- //
- // Swbrom Data offset within RISCV image and size
- //
- NvU32 swbromDataOffset;
- NvU32 swbromDataSize;
- //
- // Total size of FB carveout (image and reserved space).
- //
- NvU32 fbReservedSize;
- //
- // Indicates whether the entire RISC-V image is signed as "code" in code section.
- //
- NvU32 bSignedAsCode;
-} RM_RISCV_UCODE_DESC;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
deleted file mode 100644
index 341ab0dbeaf2..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum GSP_SEQ_BUF_OPCODE
-{
- GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
- GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- GSP_SEQ_BUF_OPCODE_REG_POLL,
- GSP_SEQ_BUF_OPCODE_DELAY_US,
- GSP_SEQ_BUF_OPCODE_REG_STORE,
- GSP_SEQ_BUF_OPCODE_CORE_RESET,
- GSP_SEQ_BUF_OPCODE_CORE_START,
- GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
- GSP_SEQ_BUF_OPCODE_CORE_RESUME,
-} GSP_SEQ_BUF_OPCODE;
-
-#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
- ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
- 0)
-
-typedef struct
-{
- NvU32 addr;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
- NvU32 timeout;
- NvU32 error;
-} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
-
-typedef struct
-{
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 index;
-} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
-
-typedef struct GSP_SEQUENCER_BUFFER_CMD
-{
- GSP_SEQ_BUF_OPCODE opCode;
- union
- {
- GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
- GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
- GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
- GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
- GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
- } payload;
-} GSP_SEQUENCER_BUFFER_CMD;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
deleted file mode 100644
index 3144e9beac61..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_generated_g_allclasses_h__
-#define __src_nvidia_generated_g_allclasses_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
-
-#define NV04_DISPLAY_COMMON (0x00000073)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
deleted file mode 100644
index 6b8921138c7d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
-#define __src_nvidia_generated_g_chipset_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- NvU16 deviceID; // deviceID
- NvU16 vendorID; // vendorID
- NvU16 subdeviceID; // subsystem deviceID
- NvU16 subvendorID; // subsystem vendorID
- NvU8 revisionID; // revision ID
-} BUSINFO;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
deleted file mode 100644
index a5128f00225b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
-#define __src_nvidia_generated_g_fbsr_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
deleted file mode 100644
index 5641a21cacca..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
-#define __src_nvidia_generated_g_gpu_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- COMPUTE_BRANDING_TYPE_NONE,
- COMPUTE_BRANDING_TYPE_TESLA,
-} COMPUTE_BRANDING_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
deleted file mode 100644
index b5ad55f854dc..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
-#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum {
- /*!
- * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
- * kernel CPU-RM clients.
- */
- ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
- /*! @brief Error notifier is explicitly not set.
- *
- * The corresponding hErrorContext or hEccErrorContext must be
- * NV01_NULL_OBJECT.
- */
- ERROR_NOTIFIER_TYPE_NONE,
- /*! @brief Error notifier is a ContextDma */
- ERROR_NOTIFIER_TYPE_CTXDMA,
- /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
- ERROR_NOTIFIER_TYPE_MEMORY
-} ErrorNotifierType;
-
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
deleted file mode 100644
index 946954ac5b3d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-
- // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
- ENGINE_INFO_TYPE_ENG_DESC = 0,
-
- // HW engine ID
- ENGINE_INFO_TYPE_FIFO_TAG,
-
- // RM_ENGINE_TYPE_*
- ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
-
- //
- // runlist id (meaning varies by GPU)
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST,
-
- // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
- ENGINE_INFO_TYPE_MMU_FAULT_ID,
-
- // ROBUST_CHANNEL_*
- ENGINE_INFO_TYPE_RC_MASK,
-
- // Reset Bit Position. On Ampere, only valid if not _INVALID
- ENGINE_INFO_TYPE_RESET,
-
- // Interrupt Bit Position
- ENGINE_INFO_TYPE_INTR,
-
- // log2(MC_ENGINE_*)
- ENGINE_INFO_TYPE_MC,
-
- // The DEV_TYPE_ENUM for this engine
- ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
-
- // The particular instance of this engine type
- ENGINE_INFO_TYPE_INSTANCE_ID,
-
- //
- // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
-
- //
- // If this entry is a host-driven engine.
- // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
- //
- ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
-
- //
- // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
-
- //
- // The base address for this engine's NV_CHRAM registers. Valid only on
- // Ampere+
- //
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
-
- // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
- ENGINE_INFO_TYPE_KERNEL_RM_MAX,
- // Used for iterating the engine info table by the index passed.
- ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
-
- // Size of FIFO_ENGINE_LIST.engineData
- ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
-
- // Input-only parameter for kfifoEngineInfoXlate.
- ENGINE_INFO_TYPE_PBDMA_ID
-
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-} ENGINE_INFO_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
deleted file mode 100644
index daabaee41c87..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
-#define __src_nvidia_generated_g_mem_desc_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define ADDR_SYSMEM 1 // System memory (PCI)
-#define ADDR_FBMEM 2 // Frame buffer memory space
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
deleted file mode 100644
index 10121218f4d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __src_nvidia_generated_g_os_nvoc_h__
-#define __src_nvidia_generated_g_os_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct PACKED_REGISTRY_ENTRY
-{
- NvU32 nameOffset;
- NvU8 type;
- NvU32 data;
- NvU32 length;
-} PACKED_REGISTRY_ENTRY;
-
-typedef struct PACKED_REGISTRY_TABLE
-{
- NvU32 size;
- NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
-} PACKED_REGISTRY_TABLE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
deleted file mode 100644
index 8d925e24faea..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef __src_nvidia_generated_g_rpc_structures_h__
-#define __src_nvidia_generated_g_rpc_structures_h__
-#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct rpc_alloc_memory_v13_01
-{
- NvHandle hClient;
- NvHandle hDevice;
- NvHandle hMemory;
- NvU32 hClass;
- NvU32 flags;
- NvU32 pteAdjust;
- NvU32 format;
- NvU64 length NV_ALIGN_BYTES(8);
- NvU32 pageCount;
- struct pte_desc pteDesc;
-} rpc_alloc_memory_v13_01;
-
-typedef struct rpc_free_v03_00
-{
- NVOS00_PARAMETERS_v03_00 params;
-} rpc_free_v03_00;
-
-typedef struct rpc_unloading_guest_driver_v1F_07
-{
- NvBool bInPMTransition;
- NvBool bGc6Entering;
- NvU32 newLevel;
-} rpc_unloading_guest_driver_v1F_07;
-
-typedef struct rpc_update_bar_pde_v15_00
-{
- UpdateBarPde_v15_00 info;
-} rpc_update_bar_pde_v15_00;
-
-typedef struct rpc_gsp_rm_alloc_v03_00
-{
- NvHandle hClient;
- NvHandle hParent;
- NvHandle hObject;
- NvU32 hClass;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 reserved[4];
- NvU8 params[];
-} rpc_gsp_rm_alloc_v03_00;
-
-typedef struct rpc_gsp_rm_control_v03_00
-{
- NvHandle hClient;
- NvHandle hObject;
- NvU32 cmd;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 params[];
-} rpc_gsp_rm_control_v03_00;
-
-typedef struct rpc_run_cpu_sequencer_v17_00
-{
- NvU32 bufferSizeDWord;
- NvU32 cmdIndex;
- NvU32 regSaveArea[8];
- NvU32 commandBuffer[];
-} rpc_run_cpu_sequencer_v17_00;
-
-typedef struct rpc_post_event_v17_00
-{
- NvHandle hClient;
- NvHandle hEvent;
- NvU32 notifyIndex;
- NvU32 data;
- NvU16 info16;
- NvU32 status;
- NvU32 eventDataSize;
- NvBool bNotifyList;
- NvU8 eventData[];
-} rpc_post_event_v17_00;
-
-typedef struct rpc_rc_triggered_v17_02
-{
- NvU32 nv2080EngineType;
- NvU32 chid;
- NvU32 exceptType;
- NvU32 scope;
- NvU16 partitionAttributionId;
-} rpc_rc_triggered_v17_02;
-
-typedef struct rpc_os_error_log_v17_00
-{
- NvU32 exceptType;
- NvU32 runlistId;
- NvU32 chid;
- char errString[0x100];
-} rpc_os_error_log_v17_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
deleted file mode 100644
index e9fed4140468..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __src_nvidia_generated_g_sdk_structures_h__
-#define __src_nvidia_generated_g_sdk_structures_h__
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVOS00_PARAMETERS_v03_00
-{
- NvHandle hRoot;
- NvHandle hObjectParent;
- NvHandle hObjectOld;
- NvV32 status;
-} NVOS00_PARAMETERS_v03_00;
-
-typedef struct UpdateBarPde_v15_00
-{
- NV_RPC_UPDATE_PDE_BAR_TYPE barType;
- NvU64 entryValue NV_ALIGN_BYTES(8);
- NvU64 entryLevelShift NV_ALIGN_BYTES(8);
-} UpdateBarPde_v15_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
deleted file mode 100644
index af50b11ec3b4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct DOD_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 acpiIdListLen;
- NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} DOD_METHOD_DATA;
-
-typedef struct JT_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 jtCaps;
- NvU16 jtRevId;
- NvBool bSBIOSCaps;
-} JT_METHOD_DATA;
-
-typedef struct MUX_METHOD_DATA_ELEMENT
-{
- NvU32 acpiId;
- NvU32 mode;
- NV_STATUS status;
-} MUX_METHOD_DATA_ELEMENT;
-
-typedef struct MUX_METHOD_DATA
-{
- NvU32 tableLen;
- MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
- MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} MUX_METHOD_DATA;
-
-typedef struct CAPS_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 optimusCaps;
-} CAPS_METHOD_DATA;
-
-typedef struct ACPI_METHOD_DATA
-{
- NvBool bValid;
- DOD_METHOD_DATA dodMethodData;
- JT_METHOD_DATA jtMethodData;
- MUX_METHOD_DATA muxMethodData;
- CAPS_METHOD_DATA capsMethodData;
-} ACPI_METHOD_DATA;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
deleted file mode 100644
index e3160c60036d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- RM_ENGINE_TYPE_NULL = (0x00000000),
- RM_ENGINE_TYPE_GR0 = (0x00000001),
- RM_ENGINE_TYPE_GR1 = (0x00000002),
- RM_ENGINE_TYPE_GR2 = (0x00000003),
- RM_ENGINE_TYPE_GR3 = (0x00000004),
- RM_ENGINE_TYPE_GR4 = (0x00000005),
- RM_ENGINE_TYPE_GR5 = (0x00000006),
- RM_ENGINE_TYPE_GR6 = (0x00000007),
- RM_ENGINE_TYPE_GR7 = (0x00000008),
- RM_ENGINE_TYPE_COPY0 = (0x00000009),
- RM_ENGINE_TYPE_COPY1 = (0x0000000a),
- RM_ENGINE_TYPE_COPY2 = (0x0000000b),
- RM_ENGINE_TYPE_COPY3 = (0x0000000c),
- RM_ENGINE_TYPE_COPY4 = (0x0000000d),
- RM_ENGINE_TYPE_COPY5 = (0x0000000e),
- RM_ENGINE_TYPE_COPY6 = (0x0000000f),
- RM_ENGINE_TYPE_COPY7 = (0x00000010),
- RM_ENGINE_TYPE_COPY8 = (0x00000011),
- RM_ENGINE_TYPE_COPY9 = (0x00000012),
- RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
- RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
- RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
- RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
- RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
- RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
- RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
- RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
- RM_ENGINE_TYPE_NVENC0 = (0x00000025),
- RM_ENGINE_TYPE_NVENC1 = (0x00000026),
- RM_ENGINE_TYPE_NVENC2 = (0x00000027),
- RM_ENGINE_TYPE_VP = (0x00000028),
- RM_ENGINE_TYPE_ME = (0x00000029),
- RM_ENGINE_TYPE_PPP = (0x0000002a),
- RM_ENGINE_TYPE_MPEG = (0x0000002b),
- RM_ENGINE_TYPE_SW = (0x0000002c),
- RM_ENGINE_TYPE_TSEC = (0x0000002d),
- RM_ENGINE_TYPE_VIC = (0x0000002e),
- RM_ENGINE_TYPE_MP = (0x0000002f),
- RM_ENGINE_TYPE_SEC2 = (0x00000030),
- RM_ENGINE_TYPE_HOST = (0x00000031),
- RM_ENGINE_TYPE_DPU = (0x00000032),
- RM_ENGINE_TYPE_PMU = (0x00000033),
- RM_ENGINE_TYPE_FBFLCN = (0x00000034),
- RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
- RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
- RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
- RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
- RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
- RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
- RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
- RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
- RM_ENGINE_TYPE_OFA = (0x0000003d),
- RM_ENGINE_TYPE_LAST = (0x0000003e),
-} RM_ENGINE_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
deleted file mode 100644
index 3abec59f0cc4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
-
-#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
deleted file mode 100644
index 4033a6f85a76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- RmPhysAddr sharedMemPhysAddr;
- NvU32 pageTableEntryCount;
- NvLength cmdQueueOffset;
- NvLength statQueueOffset;
- NvLength locklessCmdQueueOffset;
- NvLength locklessStatQueueOffset;
-} MESSAGE_QUEUE_INIT_ARGUMENTS;
-
-typedef struct {
- NvU32 oldLevel;
- NvU32 flags;
- NvBool bInPMTransition;
-} GSP_SR_INIT_ARGUMENTS;
-
-typedef struct
-{
- MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
- GSP_SR_INIT_ARGUMENTS srInitArguments;
- NvU32 gpuInstance;
-
- struct
- {
- NvU64 pa;
- NvU64 size;
- } profilerArgs;
-} GSP_ARGUMENTS_CACHED;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
deleted file mode 100644
index eeab25a5e290..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
-#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct GSP_VF_INFO
-{
- NvU32 totalVFs;
- NvU32 firstVFOffset;
- NvU64 FirstVFBar0Address;
- NvU64 FirstVFBar1Address;
- NvU64 FirstVFBar2Address;
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
-} GSP_VF_INFO;
-
-typedef struct GspSMInfo_t
-{
- NvU32 version;
- NvU32 regBankCount;
- NvU32 regBankRegCount;
- NvU32 maxWarpsPerSM;
- NvU32 maxThreadsPerWarp;
- NvU32 geomGsObufEntries;
- NvU32 geomXbufEntries;
- NvU32 maxSPPerSM;
- NvU32 rtCoreCount;
-} GspSMInfo;
-
-typedef struct GspStaticConfigInfo_t
-{
- NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
- NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
- NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
- NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
- NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
- NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
- COMPUTE_BRANDING_TYPE computeBranding;
-
- NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
- NvU32 sriovMaxGfid;
-
- NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
-
- GspSMInfo SM_info;
-
- NvBool poisonFuseEnabled;
-
- NvU64 fb_length;
- NvU32 fbio_mask;
- NvU32 fb_bus_width;
- NvU32 fb_ram_type;
- NvU32 fbp_mask;
- NvU32 l2_cache_size;
-
- NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
- NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
-
- NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvBool bGpuInternalSku;
- NvBool bIsQuadroGeneric;
- NvBool bIsQuadroAd;
- NvBool bIsNvidiaNvs;
- NvBool bIsVgx;
- NvBool bGeforceSmb;
- NvBool bIsTitan;
- NvBool bIsTesla;
- NvBool bIsMobile;
- NvBool bIsGc6Rtd3Allowed;
- NvBool bIsGcOffRtd3Allowed;
- NvBool bIsGcoffLegacyAllowed;
-
- NvU64 bar1PdeBase;
- NvU64 bar2PdeBase;
-
- NvBool bVbiosValid;
- NvU32 vbiosSubVendor;
- NvU32 vbiosSubDevice;
-
- NvBool bPageRetirementSupported;
-
- NvBool bSplitVasBetweenServerClientRm;
-
- NvBool bClRootportNeedsNosnoopWAR;
-
- VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
- VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
- NvU64 displaylessMaxPixels;
-
- // Client handle for internal RMAPI control.
- NvHandle hInternalClient;
-
- // Device handle for internal RMAPI control.
- NvHandle hInternalDevice;
-
- // Subdevice handle for internal RMAPI control.
- NvHandle hInternalSubdevice;
-
- NvBool bSelfHostedMode;
- NvBool bAtsSupported;
-
- NvBool bIsGpuUefi;
-} GspStaticConfigInfo;
-
-typedef struct GspSystemInfo
-{
- NvU64 gpuPhysAddr;
- NvU64 gpuPhysFbAddr;
- NvU64 gpuPhysInstAddr;
- NvU64 nvDomainBusDeviceFunc;
- NvU64 simAccessBufPhysAddr;
- NvU64 pcieAtomicsOpMask;
- NvU64 consoleMemSize;
- NvU64 maxUserVa;
- NvU32 pciConfigMirrorBase;
- NvU32 pciConfigMirrorSize;
- NvU8 oorArch;
- NvU64 clPdbProperties;
- NvU32 Chipset;
- NvBool bGpuBehindBridge;
- NvBool bMnocAvailable;
- NvBool bUpstreamL0sUnsupported;
- NvBool bUpstreamL1Unsupported;
- NvBool bUpstreamL1PorSupported;
- NvBool bUpstreamL1PorMobileOnly;
- NvU8 upstreamAddressValid;
- BUSINFO FHBBusInfo;
- BUSINFO chipsetIDInfo;
- ACPI_METHOD_DATA acpiMethodData;
- NvU32 hypervisorType;
- NvBool bIsPassthru;
- NvU64 sysTimerOffsetNs;
- GSP_VF_INFO gspVFInfo;
-} GspSystemInfo;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
deleted file mode 100644
index bd5e01f9814b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MC_ENGINE_IDX_DISP 2
-
-#define MC_ENGINE_IDX_CE0 15
-
-#define MC_ENGINE_IDX_CE9 24
-
-#define MC_ENGINE_IDX_MSENC 38
-
-#define MC_ENGINE_IDX_MSENC2 40
-
-#define MC_ENGINE_IDX_GSP 49
-#define MC_ENGINE_IDX_NVJPG 50
-#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
-#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
-
-#define MC_ENGINE_IDX_NVJPEG7 57
-
-#define MC_ENGINE_IDX_BSP 64
-#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
-#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
-
-#define MC_ENGINE_IDX_NVDEC7 71
-
-#define MC_ENGINE_IDX_OFA0 80
-
-#define MC_ENGINE_IDX_GR 82
-#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
deleted file mode 100644
index 366447a368bf..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVGPU_ENGINE_CAPS_MASK_BITS 32
-#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
deleted file mode 100644
index 4a850dad4776..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
-#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MEMORY_WRITECOMBINED 2
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
deleted file mode 100644
index f14b23852456..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MAX_GPC_COUNT 32
-
-typedef enum
-{
- NV_RPC_UPDATE_PDE_BAR_1,
- NV_RPC_UPDATE_PDE_BAR_2,
- NV_RPC_UPDATE_PDE_BAR_INVALID,
-} NV_RPC_UPDATE_PDE_BAR_TYPE;
-
-typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
-{
- NvU32 headIndex;
- NvU32 maxHResolution;
- NvU32 maxVResolution;
-} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
-
-typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
-{
- NvU32 numHeads;
- NvU32 maxNumHeads;
-} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
deleted file mode 100644
index 7801af232dff..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-struct pte_desc
-{
- NvU32 idr:2;
- NvU32 reserved1:14;
- NvU32 length:16;
- union {
- NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
- NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
- } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
index e6833df1ccc7..af11648ad9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -21,4 +21,6 @@ typedef NvU64 NvLength;
typedef NvU64 RmPhysAddr;
typedef NvU32 NV_STATUS;
+
+typedef union {} rpc_generic_union;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2a0617e5fe2a..a3ba07fc48a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
}
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
- getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB);
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -416,7 +416,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
if (nouveau_cli_uvmm(cli)) {
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
+ chan->chan->chan.gpfifo.max);
if (ret)
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d47442125fa1..4a75d146a171 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -42,7 +42,7 @@
#include "nouveau_acpi.h"
static struct ida bl_ida;
-#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
+#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0'
static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
@@ -245,7 +245,7 @@ nv50_backlight_init(struct nouveau_backlight *bl,
if (nv_conn->type == DCB_CONNECTOR_eDP) {
int ret;
- u16 current_level;
+ u32 current_level;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 current_mode;
@@ -261,8 +261,9 @@ nv50_backlight_init(struct nouveau_backlight *bl,
NV_DEBUG(drm, "DPCD backlight controls supported on %s\n",
nv_conn->base.name);
- ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info,
+ 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2016c1e7242f..b96f0555ca14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -401,6 +401,83 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
return 0;
}
+void
+nouveau_bo_unpin_del(struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo = *pnvbo;
+
+ if (!nvbo)
+ return;
+
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_fini(nvbo);
+
+ *pnvbo = NULL;
+}
+
+int
+nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(nvbo, domain, false);
+ if (ret) {
+ nouveau_bo_fini(nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_pin(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size,
+ struct nouveau_bo **pnvbo, struct nouveau_vma **pvma)
+{
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_map(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vma_new(nvbo, vmm, pvma);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -923,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1204,7 +1284,7 @@ retry:
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
- device->func->resource_addr(device, 1);
+ device->func->resource_addr(device, NVKM_BAR1_FB);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
@@ -1295,7 +1375,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(drm);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 596a63a50a20..d59fd12268b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,6 +9,7 @@ struct nouveau_channel;
struct nouveau_cli;
struct nouveau_drm;
struct nouveau_fence;
+struct nouveau_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -89,6 +90,12 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
+int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map_gpu(struct nouveau_cli *, u32 domain, u32 size,
+ struct nouveau_bo **, struct nouveau_vma **);
+void nouveau_bo_unpin_del(struct nouveau_bo **);
+
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cd659b9fd1d9..b1e92b1f7a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -103,12 +103,11 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_event_dtor(&chan->kill);
nvif_object_dtor(&chan->user);
nvif_mem_dtor(&chan->mem_userd);
+ nouveau_vma_del(&chan->sema.vma);
+ nouveau_bo_unpin_del(&chan->sema.bo);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
- nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->bo.pin_count)
- nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_fini(chan->push.buffer);
+ nouveau_bo_unpin_del(&chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -163,14 +162,7 @@ nouveau_channel_prep(struct nouveau_cli *cli,
if (nouveau_vram_pushbuf)
target = NOUVEAU_GEM_DOMAIN_VRAM;
- ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
- &chan->push.buffer);
- if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target, false);
- if (ret == 0)
- ret = nouveau_bo_map(chan->push.buffer);
- }
-
+ ret = nouveau_bo_new_map(cli, target, size, &chan->push.buffer);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -199,8 +191,10 @@ nouveau_channel_prep(struct nouveau_cli *cli,
chan->push.addr = chan->push.vma->addr;
- if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
- return 0;
+ if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE,
+ &chan->sema.bo, &chan->sema.vma);
+ }
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
@@ -209,13 +203,15 @@ nouveau_channel_prep(struct nouveau_cli *cli,
} else
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
+ args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -253,27 +249,27 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
- { AMPERE_CHANNEL_GPFIFO_B, 0 },
- { AMPERE_CHANNEL_GPFIFO_A, 0 },
- { TURING_CHANNEL_GPFIFO_A, 0 },
- { VOLTA_CHANNEL_GPFIFO_A, 0 },
- { PASCAL_CHANNEL_GPFIFO_A, 0 },
- { MAXWELL_CHANNEL_GPFIFO_A, 0 },
- { KEPLER_CHANNEL_GPFIFO_B, 0 },
- { KEPLER_CHANNEL_GPFIFO_A, 0 },
- { FERMI_CHANNEL_GPFIFO , 0 },
- { G82_CHANNEL_GPFIFO , 0 },
- { NV50_CHANNEL_GPFIFO , 0 },
- { NV40_CHANNEL_DMA , 0 },
- { NV17_CHANNEL_DMA , 0 },
- { NV10_CHANNEL_DMA , 0 },
- { NV03_CHANNEL_DMA , 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_B, 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_A, 0 },
+ { HOPPER_CHANNEL_GPFIFO_A, 0 },
+ { AMPERE_CHANNEL_GPFIFO_B, 0 },
+ { AMPERE_CHANNEL_GPFIFO_A, 0 },
+ { TURING_CHANNEL_GPFIFO_A, 0 },
+ { VOLTA_CHANNEL_GPFIFO_A, 0 },
+ { PASCAL_CHANNEL_GPFIFO_A, 0 },
+ { MAXWELL_CHANNEL_GPFIFO_A, 0 },
+ { KEPLER_CHANNEL_GPFIFO_B, 0 },
+ { KEPLER_CHANNEL_GPFIFO_A, 0 },
+ { FERMI_CHANNEL_GPFIFO , 0 },
+ { G82_CHANNEL_GPFIFO , 0 },
+ { NV50_CHANNEL_GPFIFO , 0 },
+ { NV40_CHANNEL_DMA , 0 },
+ { NV17_CHANNEL_DMA , 0 },
+ { NV10_CHANNEL_DMA , 0 },
+ { NV03_CHANNEL_DMA , 0 },
{}
};
- struct {
- struct nvif_chan_v0 chan;
- char name[TASK_COMM_LEN+16];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16);
struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
@@ -298,28 +294,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
return ret;
/* create channel object */
- args.chan.version = 0;
- args.chan.namelen = sizeof(args.name);
- args.chan.runlist = __ffs64(runm);
- args.chan.runq = 0;
- args.chan.priv = priv;
- args.chan.devm = BIT(0);
+ args->version = 0;
+ args->namelen = __member_size(args->name);
+ args->runlist = __ffs64(runm);
+ args->runq = 0;
+ args->priv = priv;
+ args->devm = BIT(0);
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
- args.chan.vmm = 0;
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
- args.chan.offset = chan->push.addr;
- args.chan.length = 0;
+ args->vmm = 0;
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->offset = chan->push.addr;
+ args->length = 0;
} else {
- args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
+ args->vmm = nvif_handle(&chan->vmm->vmm.object);
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
else
- args.chan.ctxdma = 0;
- args.chan.offset = ioffset + chan->push.addr;
- args.chan.length = ilength;
+ args->ctxdma = 0;
+ args->offset = ioffset + chan->push.addr;
+ args->length = ilength;
}
- args.chan.huserd = 0;
- args.chan.ouserd = 0;
+ args->huserd = 0;
+ args->ouserd = 0;
/* allocate userd */
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
@@ -329,27 +325,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
if (ret)
return ret;
- args.chan.huserd = nvif_handle(&chan->mem_userd.object);
- args.chan.ouserd = 0;
+ args->huserd = nvif_handle(&chan->mem_userd.object);
+ args->ouserd = 0;
chan->userd = &chan->mem_userd.object;
} else {
chan->userd = &chan->user;
}
- snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
+ snprintf(args->name, __member_size(args->name), "%s[%d]",
+ current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
- &args, sizeof(args), &chan->user);
+ args, __struct_size(args), &chan->user);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- chan->runlist = args.chan.runlist;
- chan->chid = args.chan.chid;
- chan->inst = args.chan.inst;
- chan->token = args.chan.token;
+ chan->runlist = args->runlist;
+ chan->chid = args->chid;
+ chan->inst = args->inst;
+ chan->token = args->token;
return 0;
}
@@ -367,17 +364,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_KILLED;
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
nouveau_channel_killed, false,
- &args.base, sizeof(args), &chan->kill);
+ args, __struct_size(args), &chan->kill);
if (ret == 0)
ret = nvif_event_allow(&chan->kill);
if (ret) {
@@ -433,25 +430,33 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
- switch (chan->user.oclass) {
- case NV03_CHANNEL_DMA:
- case NV10_CHANNEL_DMA:
- case NV17_CHANNEL_DMA:
- case NV40_CHANNEL_DMA:
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO) {
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
- break;
- default:
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- chan->user_get_hi = 0x60;
- chan->dma.ib_base = 0x10000 / 4;
- chan->dma.ib_max = NV50_DMA_IB_MAX;
- chan->dma.ib_put = 0;
- chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
- chan->dma.max = chan->dma.ib_base;
- break;
+ } else
+ if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
+ ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000);
+ if (ret)
+ return ret;
+ } else
+ if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) {
+ ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr,
+ &drm->client.device.user, chan->token);
+ if (ret)
+ return ret;
}
chan->dma.put = 0;
@@ -520,46 +525,44 @@ nouveau_channels_fini(struct nouveau_drm *drm)
int
nouveau_channels_init(struct nouveau_drm *drm)
{
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data channels;
- struct nv_device_info_v1_data runlists;
- } v;
- } args = {
- .m.version = 1,
- .m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
- .v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
- };
+ DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2);
+ struct nv_device_info_v1_data *channels = &args->data[0];
+ struct nv_device_info_v1_data *runlists = &args->data[1];
struct nvif_object *device = &drm->client.device.object;
int ret, i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
+ args->version = 1;
+ args->count = __member_size(args->data) / sizeof(*args->data);
+ channels->mthd = NV_DEVICE_HOST_CHANNELS;
+ runlists->mthd = NV_DEVICE_HOST_RUNLISTS;
+
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
if (ret ||
- args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
- args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data ||
+ channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->chan_nr = drm->chan_total = args.v.channels.data;
- drm->runl_nr = fls64(args.v.runlists.data);
+ drm->chan_nr = drm->chan_total = channels->data;
+ drm->runl_nr = fls64(runlists->data);
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
if (!drm->runl)
return -ENOMEM;
if (drm->chan_nr == 0) {
for (i = 0; i < drm->runl_nr; i++) {
- if (!(args.v.runlists.data & BIT(i)))
+ if (!(runlists->data & BIT(i)))
continue;
- args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
- args.v.channels.data = i;
+ channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
+ channels->data = i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
- if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
+ if (ret || channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->runl[i].chan_nr = args.v.channels.data;
+ drm->runl[i].chan_nr = channels->data;
drm->runl[i].chan_id_base = drm->chan_total;
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 016f668c0bc1..561877725aac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -3,13 +3,11 @@
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
#include <nvif/event.h>
-#include <nvif/push.h>
+#include <nvif/chan.h>
struct nvif_device;
struct nouveau_channel {
- struct {
- struct nvif_push push;
- } chan;
+ struct nvif_chan chan;
struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
@@ -41,15 +39,15 @@ struct nouveau_channel {
int free;
int cur;
int put;
- int ib_base;
- int ib_max;
- int ib_free;
- int ib_put;
} dma;
- u32 user_get_hi;
u32 user_get;
u32 user_put;
+ struct {
+ struct nouveau_bo *bo;
+ struct nouveau_vma *vma;
+ } sema;
+
struct nvif_object user;
struct nvif_object blit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1b10c6c12f46..63621b1510f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1401,6 +1401,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
nv_connector->aux.name = connector->name;
+ if (disp->disp.object.oclass >= GB202_DISP)
+ nv_connector->aux.no_zero_sized = true;
drm_dp_aux_init(&nv_connector->aux);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 200e65a7cefc..c7869a639bef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -314,14 +314,10 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
drm->debugfs = NULL;
}
-int
+void
nouveau_module_debugfs_init(void)
{
nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL);
- if (IS_ERR(nouveau_debugfs_root))
- return PTR_ERR(nouveau_debugfs_root);
-
- return 0;
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index b7617b344ee2..d05ed0e641c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -24,7 +24,7 @@ extern void nouveau_debugfs_fini(struct nouveau_drm *);
extern struct dentry *nouveau_debugfs_root;
-int nouveau_module_debugfs_init(void);
+void nouveau_module_debugfs_init(void);
void nouveau_module_debugfs_fini(void);
#else
static inline void
@@ -42,10 +42,9 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
{
}
-static inline int
+static inline void
nouveau_module_debugfs_init(void)
{
- return 0;
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index add006fc8d81..805d0a87aa54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
@@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
- const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
@@ -295,8 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
- info = drm_get_format_info(dev, mode_cmd);
-
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
@@ -320,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@@ -332,6 +330,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb;
@@ -342,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (!gem)
return ERR_PTR(-ENOENT);
- ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
+ ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
if (ret == 0)
return fb;
@@ -495,7 +494,7 @@ nouveau_display_hpd_work(struct work_struct *work)
if (first_changed_connector)
drm_connector_put(first_changed_connector);
- pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
noop:
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1f506f8b289c..470e0910d484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,8 +8,11 @@
#include <drm/drm_framebuffer.h>
+struct drm_format_info;
+
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb);
@@ -67,5 +70,6 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_format_info *,
const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index a1f329ef0641..017a803121d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -43,8 +43,6 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
uint64_t val;
val = nvif_rd32(chan->userd, chan->user_get);
- if (chan->user_get_hi)
- val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -68,111 +66,12 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return (val - chan->push.addr) >> 2;
}
-void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
- bool no_prefetch)
-{
- struct nvif_user *user = &chan->cli->drm->client.device.user;
- struct nouveau_bo *pb = chan->push.buffer;
- int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
-
- BUG_ON(chan->dma.ib_free < 1);
- WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
-
- nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
- (no_prefetch ? (1 << 31) : 0));
-
- chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
-
- mb();
- /* Flush writes. */
- nouveau_bo_rd32(pb, 0);
-
- nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put);
- if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->token);
- chan->dma.ib_free--;
-}
-
-static int
-nv50_dma_push_wait(struct nouveau_channel *chan, int count)
-{
- uint32_t cnt = 0, prev_get = 0;
-
- while (chan->dma.ib_free < count) {
- uint32_t get = nvif_rd32(chan->userd, 0x88);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- udelay(1);
- if (cnt > 100000)
- return -EBUSY;
- }
-
- chan->dma.ib_free = get - chan->dma.ib_put;
- if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max;
- }
-
- return 0;
-}
-
-static int
-nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
-{
- uint64_t prev_get = 0;
- int ret, cnt = 0;
-
- ret = nv50_dma_push_wait(chan, slots + 1);
- if (unlikely(ret))
- return ret;
-
- while (chan->dma.free < count) {
- int get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
-
- return get;
- }
-
- if (get <= chan->dma.cur) {
- chan->dma.free = chan->dma.max - chan->dma.cur;
- if (chan->dma.free >= count)
- break;
-
- FIRE_RING(chan);
- do {
- get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
- return get;
- }
- } while (get == 0);
- chan->dma.cur = 0;
- chan->dma.put = 0;
- }
-
- chan->dma.free = get - chan->dma.cur - 1;
- }
-
- return 0;
-}
-
int
-nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
uint64_t prev_get = 0;
int cnt = 0, get;
- if (chan->dma.ib_max)
- return nv50_dma_wait(chan, slots, size);
-
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c52cda82353e..0e27b76d1e1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -30,9 +30,7 @@
#include "nouveau_bo.h"
#include "nouveau_chan.h"
-int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
- bool no_prefetch);
+int nouveau_dma_wait(struct nouveau_channel *, int size);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -67,7 +65,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
{
int ret;
- ret = nouveau_dma_wait(chan, 1, size);
+ ret = nouveau_dma_wait(chan, size);
if (ret)
return ret;
@@ -94,12 +92,7 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- if (chan->dma.ib_max) {
- nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2, false);
- } else {
- WRITE_PUT(chan->dma.cur);
- }
+ WRITE_PUT(chan->dma.cur);
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61d0f411ef84..ca4932a150e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -256,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
- ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
- &chunk->bo);
+ ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE,
+ &chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret)
- goto out_bo_free;
-
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
- goto out_bo_unpin;
+ goto out_bo_free;
}
mutex_lock(&drm->dmem->mutex);
@@ -292,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
return 0;
-out_bo_unpin:
- nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e154d08857c5..1527b801f013 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -503,11 +503,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
- ret = nvc0_fence_create(drm);
+ case HOPPER_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_B:
+ ret = gv100_fence_create(drm);
break;
default:
break;
@@ -1079,6 +1084,10 @@ nouveau_pmops_freeze(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_suspend(drm, false);
}
@@ -1087,6 +1096,10 @@ nouveau_pmops_thaw(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_resume(drm, false);
}
@@ -1448,9 +1461,7 @@ nouveau_drm_init(void)
if (!nouveau_modeset)
return 0;
- ret = nouveau_module_debugfs_init();
- if (ret)
- return ret;
+ nouveau_module_debugfs_init();
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_register(&nouveau_platform_driver);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a0b5f1b16e8b..edbbda78bac9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -10,6 +10,8 @@
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
+#include <nvif/class.h>
+
/**
* DOC: Overview
*
@@ -131,7 +133,7 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
- ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
@@ -141,9 +143,11 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
- nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch);
}
+ nvif_chan_gpfifo_post(&chan->chan);
+
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
@@ -185,7 +189,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct nouveau_job_ops nouveau_exec_job_ops = {
@@ -375,10 +379,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
- if (!chan->dma.ib_max)
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO)
return nouveau_abi16_put(abi16, -ENOSYS);
- push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
+ push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, push_max);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index edddfc036c6d..9f345a008717 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -38,22 +38,16 @@
static const struct dma_fence_ops nouveau_fence_ops_uevent;
static const struct dma_fence_ops nouveau_fence_ops_legacy;
-static inline struct nouveau_fence *
-from_fence(struct dma_fence *fence)
-{
- return container_of(fence, struct nouveau_fence, base);
-}
-
static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence *fence)
{
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static int
+static bool
nouveau_fence_signal(struct nouveau_fence *fence)
{
- int drop = 0;
+ bool drop = false;
dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
@@ -63,7 +57,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- drop = 1;
+ drop = true;
}
dma_fence_put(&fence->base);
@@ -77,19 +71,17 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
fence->ops != &nouveau_fence_ops_uevent)
return NULL;
- return from_fence(fence);
+ return to_nouveau_fence(fence);
}
void
nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
- struct nouveau_fence *fence;
+ struct nouveau_fence *fence, *tmp;
unsigned long flags;
spin_lock_irqsave(&fctx->lock, flags);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
@@ -127,23 +119,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static int
+static void
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence;
- int drop = 0;
+ struct nouveau_fence *fence, *tmp;
+ bool drop = false;
u32 seq = fctx->read(chan);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if ((int)(seq - fence->base.seqno) < 0)
break;
- drop |= nouveau_fence_signal(fence);
+ if (nouveau_fence_signal(fence))
+ drop = true;
}
- return drop;
+ if (drop)
+ nvif_event_block(&fctx->event);
}
static void
@@ -151,22 +143,16 @@ nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
unsigned long flags;
- int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
- if (!list_empty(&fctx->pending)) {
- struct nouveau_fence *fence;
- struct nouveau_channel *chan;
-
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ fence = list_first_entry_or_null(&fctx->pending, typeof(*fence), head);
+ if (fence) {
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(chan, fctx))
- drop = 1;
+ nouveau_fence_update(chan, fctx);
}
- if (drop)
- nvif_event_block(&fctx->event);
-
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -184,10 +170,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = cli->drm;
struct nouveau_fence_priv *priv = (void*)drm->fence;
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
@@ -207,12 +193,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
- &args.base, sizeof(args), &fctx->event);
+ args, __struct_size(args), &fctx->event);
WARN_ON(ret);
}
@@ -246,9 +232,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return -ENODEV;
}
- if (nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
-
+ nouveau_fence_update(chan, fctx);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -256,31 +240,44 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
}
+void
+nouveau_fence_cancel(struct nouveau_fence *fence)
+{
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ if (!dma_fence_is_signaled_locked(&fence->base)) {
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ if (nouveau_fence_signal(fence))
+ nvif_event_block(&fctx->event);
+ }
+ spin_unlock_irqrestore(&fctx->lock, flags);
+}
+
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->base.ops == &nouveau_fence_ops_legacy ||
- fence->base.ops == &nouveau_fence_ops_uevent) {
- struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan;
- unsigned long flags;
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
+ unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return true;
+ if (dma_fence_is_signaled(&fence->base))
+ return true;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan)
+ nouveau_fence_update(chan, fctx);
+ spin_unlock_irqrestore(&fctx->lock, flags);
- spin_lock_irqsave(&fctx->lock, flags);
- chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (chan && nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
- spin_unlock_irqrestore(&fctx->lock, flags);
- }
return dma_fence_is_signaled(&fence->base);
}
static long
nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
unsigned long t = jiffies, timeout = t + wait;
@@ -460,7 +457,7 @@ static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
return !fctx->dead ? fctx->name : "dead channel";
@@ -474,7 +471,7 @@ static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
*/
static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
bool ret = false;
@@ -490,7 +487,7 @@ static bool nouveau_fence_is_signaled(struct dma_fence *f)
static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
/*
* caller should have a reference on the fence,
@@ -515,7 +512,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f)
static void nouveau_fence_release(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
@@ -533,7 +530,7 @@ static const struct dma_fence_ops nouveau_fence_ops_legacy = {
static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8bc065acfe35..9957a919bd38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,12 +17,19 @@ struct nouveau_fence {
unsigned long timeout;
};
+static inline struct nouveau_fence *
+to_nouveau_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct nouveau_fence, base);
+}
+
int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
+void nouveau_fence_cancel(struct nouveau_fence *fence);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -83,6 +90,7 @@ void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
+int gv100_fence_create(struct nouveau_drm *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 67e3c99de73a..690e10fbf0bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -850,8 +850,8 @@ revalidate:
}
}
- if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
+ ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
@@ -864,8 +864,10 @@ revalidate:
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, addr, length, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch);
}
+
+ nvif_chan_gpfifo_post(&chan->chan);
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
@@ -958,7 +960,7 @@ out_prevalid:
u_free(push);
out_next:
- if (chan->dma.ib_max) {
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index d326e55d2d24..0cc0bc9f9952 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -11,6 +11,7 @@
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
+#include "nouveau_chan.h"
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
@@ -87,7 +88,8 @@ nouveau_job_init(struct nouveau_job *job,
}
ret = drm_sched_job_init(&job->base, &sched->entity,
- args->credits, NULL);
+ args->credits, NULL,
+ job->file_priv->client_id);
if (ret)
goto err_free_chains;
@@ -120,11 +122,9 @@ nouveau_job_done(struct nouveau_job *job)
{
struct nouveau_sched *sched = job->sched;
- spin_lock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
list_del(&job->entry);
- spin_unlock(&sched->job.list.lock);
-
- wake_up(&sched->job.wq);
+ spin_unlock(&sched->job_list.lock);
}
void
@@ -305,9 +305,9 @@ nouveau_job_submit(struct nouveau_job *job)
}
/* Submit was successful; add the job to the schedulers job list. */
- spin_lock(&sched->job.list.lock);
- list_add(&job->entry, &sched->job.list.head);
- spin_unlock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
+ list_add(&job->entry, &sched->job_list.head);
+ spin_unlock(&sched->job_list.lock);
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
@@ -370,7 +370,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
- enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET;
drm_sched_stop(sched, sched_job);
@@ -392,10 +392,23 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
+static void
+nouveau_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_fence *fence;
+ struct nouveau_job *job;
+
+ job = to_nouveau_job(sched_job);
+ fence = to_nouveau_fence(job->done_fence);
+
+ nouveau_fence_cancel(fence);
+}
+
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
+ .cancel_job = nouveau_sched_cancel_job,
};
static int
@@ -445,9 +458,8 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
goto fail_sched;
mutex_init(&sched->mutex);
- spin_lock_init(&sched->job.list.lock);
- INIT_LIST_HEAD(&sched->job.list.head);
- init_waitqueue_head(&sched->job.wq);
+ spin_lock_init(&sched->job_list.lock);
+ INIT_LIST_HEAD(&sched->job_list.head);
return 0;
@@ -481,16 +493,12 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
-
static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
-
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 20cd1da8db73..b98c3f0bef30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -103,12 +103,9 @@ struct nouveau_sched {
struct mutex mutex;
struct {
- struct {
- struct list_head head;
- spinlock_t lock;
- } list;
- struct wait_queue_head wq;
- } job;
+ struct list_head head;
+ spinlock_t lock;
+ } job_list;
};
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e12e2596ed84..6fa387da0637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work)
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
- struct {
- struct nouveau_pfnmap_args i;
- u64 phys[1];
- } args;
+ DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
@@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work)
mutex_unlock(&svm->mutex);
/* Process list of faults. */
- args.i.i.version = 0;
- args.i.i.type = NVIF_IOCTL_V0_MTHD;
- args.i.m.version = 0;
- args.i.m.method = NVIF_VMM_V0_PFNMAP;
- args.i.p.version = 0;
+ args->i.version = 0;
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.version = 0;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
@@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work)
* fault window, determining required pages and access
* permissions based on pending faults.
*/
- args.i.p.addr = start;
- args.i.p.page = PAGE_SHIFT;
- args.i.p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.page = PAGE_SHIFT;
+ args->p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
@@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work)
notifier.svmm = svmm;
if (atomic)
- ret = nouveau_atomic_range_fault(svmm, svm->drm,
- &args.i, sizeof(args),
+ ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
&notifier);
else
- ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags,
- &notifier);
+ ret = nouveau_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
+ hmm_flags, &notifier);
mmput(mm);
- limit = args.i.p.addr + args.i.p.size;
+ limit = args->p.addr + args->p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
@@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work)
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e244927eb5d4..7d2436e5d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->client.device.info.ram_user;
- arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
ret = nouveau_ttm_init_vram(drm);
if (ret) {
@@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
/* GART init */
if (!drm->agp.bridge) {
@@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
- arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..ddfc46bc1b3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
u64 end = addr + range;
again:
- spin_lock(&sched->job.list.lock);
- list_for_each_entry(__job, &sched->job.list.head, entry) {
+ spin_lock(&sched->job_list.lock);
+ list_for_each_entry(__job, &sched->job_list.head, entry) {
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
list_for_each_op(op, &bind_job->ops) {
@@ -1030,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1038,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 8c73f40e3bda..40ee95340814 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -85,10 +85,8 @@ void
nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index d09bfd11369f..1b0c0aa3c305 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -130,20 +130,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 62e28dddf87c..e1f0e8adf313 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -81,20 +81,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index aa7dd0c5d917..1765b2cedaf9 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -185,10 +185,8 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -222,19 +220,8 @@ nv84_fence_create(struct nouveau_drm *drm)
* will lose CPU/GPU coherency!
*/
NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
- ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0,
- domain, 0, 0, NULL, NULL, &priv->bo);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, domain, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
+ ret = nouveau_bo_new_map(&drm->client, domain, 16 * drm->chan_total, &priv->bo);
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index b7963a39dd91..198889c20ce1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -14,6 +14,12 @@ nvif-y += nvif/outp.o
nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
+# Channel classes
+nvif-y += nvif/chan.o
+nvif-y += nvif/chan506f.o
+nvif-y += nvif/chan906f.o
+nvif-y += nvif/chanc36f.o
+
# Usermode classes
nvif-y += nvif/user.o
nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
new file mode 100644
index 000000000000..80c01017d642
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+static void
+nvif_chan_gpfifo_push_kick(struct nvif_push *push)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+ u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr;
+ u32 cnt;
+
+ if (chan->func->gpfifo.post) {
+ if (push->end - push->cur < chan->func->gpfifo.post_size)
+ push->end = push->cur + chan->func->gpfifo.post_size;
+
+ WARN_ON(nvif_chan_gpfifo_post(chan));
+ }
+
+ cnt = push->cur - push->bgn;
+
+ chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false);
+ chan->func->gpfifo.kick(chan);
+}
+
+static int
+nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+
+ return nvif_chan_gpfifo_wait(chan, 1, push_nr);
+}
+
+int
+nvif_chan_gpfifo_post(struct nvif_chan *chan)
+{
+ const u32 *map = chan->push.mem.object.map.ptr;
+ const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
+ const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+
+ if (!chan->func->gpfifo.post)
+ return 0;
+
+ return chan->func->gpfifo.post(chan, gpptr, pbptr);
+}
+
+void
+nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch)
+{
+ chan->func->gpfifo.push(chan, false, addr, size, no_prefetch);
+}
+
+int
+nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr)
+{
+ struct nvif_push *push = &chan->push;
+ int ret = 0, time = 1000000;
+
+ if (gpfifo_nr) {
+ /* Account for pushbuf space needed by nvif_chan_gpfifo_post(),
+ * if used after pushing userspace GPFIFO entries.
+ */
+ if (chan->func->gpfifo.post)
+ push_nr += chan->func->gpfifo.post_size;
+ }
+
+ /* Account for the GPFIFO entry needed to submit pushbuf. */
+ if (push_nr)
+ gpfifo_nr++;
+
+ /* Wait for space in main push buffer. */
+ if (push->cur + push_nr > push->end) {
+ ret = nvif_chan_dma_wait(chan, push_nr);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for GPFIFO space. */
+ while (chan->gpfifo.free < gpfifo_nr) {
+ chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1;
+ if (chan->gpfifo.free < 0)
+ chan->gpfifo.free += chan->gpfifo.max + 1;
+
+ if (chan->gpfifo.free < gpfifo_nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+void
+nvif_chan_gpfifo_ctor(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *chan)
+{
+ chan->func = func;
+
+ chan->userd.map.ptr = userd;
+
+ chan->gpfifo.map.ptr = gpfifo;
+ chan->gpfifo.max = (gpfifo_size >> 3) - 1;
+ chan->gpfifo.free = chan->gpfifo.max;
+
+ chan->push.mem.object.map.ptr = push;
+ chan->push.wait = nvif_chan_gpfifo_push_wait;
+ chan->push.kick = nvif_chan_gpfifo_push_kick;
+ chan->push.addr = push_addr;
+ chan->push.hw.max = push_size >> 2;
+ chan->push.bgn = chan->push.cur = chan->push.end = push;
+}
+
+int
+nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr)
+{
+ struct nvif_push *push = &chan->push;
+ u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
+ u32 free, time = 1000000;
+
+ nr += chan->func->gpfifo.post_size;
+
+ do {
+ u32 get = chan->func->push.read_get(chan);
+
+ if (get <= cur) {
+ free = push->hw.max - cur;
+ if (free >= nr)
+ break;
+
+ PUSH_KICK(push);
+
+ while (get == 0) {
+ get = chan->func->push.read_get(chan);
+ if (get == 0) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ cur = 0;
+ }
+
+ free = get - cur - 1;
+
+ if (free < nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ } while (free < nr);
+
+ push->bgn = (u32 *)push->mem.object.map.ptr + cur;
+ push->cur = push->bgn;
+ push->end = push->bgn + free - chan->func->gpfifo.post_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c
new file mode 100644
index 000000000000..d3900887c4a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+void
+nvif_chan506f_gpfifo_kick(struct nvif_chan *chan)
+{
+ wmb();
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+}
+
+void
+nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, bool no_prefetch)
+{
+ u32 gpptr = chan->gpfifo.cur << 3;
+
+ if (WARN_ON(!chan->gpfifo.free))
+ return;
+
+ nvif_wr32(&chan->gpfifo, gpptr + 0, lower_32_bits(addr));
+ nvif_wr32(&chan->gpfifo, gpptr + 4, upper_32_bits(addr) |
+ (main ? 0 : BIT(9)) |
+ (size >> 2) << 10 |
+ (no_prefetch ? BIT(31) : 0));
+
+ chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ chan->gpfifo.free--;
+ if (!chan->gpfifo.free)
+ chan->push.end = chan->push.cur;
+}
+
+static u32
+nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->userd, 0x88);
+}
+
+static u32
+nvif_chan506f_read_get(struct nvif_chan *chan)
+{
+ u32 tlgetlo = nvif_rd32(&chan->userd, 0x58);
+ u32 tlgethi = nvif_rd32(&chan->userd, 0x5c);
+ struct nvif_push *push = &chan->push;
+
+ /* Update cached GET pointer if TOP_LEVEL_GET is valid. */
+ if (tlgethi & BIT(31)) {
+ u64 tlget = ((u64)(tlgethi & 0xff) << 32) | tlgetlo;
+
+ push->hw.get = (tlget - push->addr) >> 2;
+ }
+
+ return push->hw.get;
+}
+
+static const struct nvif_chan_func
+nvif_chan506f = {
+ .push.read_get = nvif_chan506f_read_get,
+ .gpfifo.read_get = nvif_chan506f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+};
+
+int
+nvif_chan506f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size)
+{
+ nvif_chan_gpfifo_ctor(&nvif_chan506f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, chan);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan906f.c b/drivers/gpu/drm/nouveau/nvif/chan906f.c
new file mode 100644
index 000000000000..c9cfb85179b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan906f.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
+/* Limits GPFIFO size to 1MiB, and "main" push buffer size to 64KiB. */
+#define NVIF_CHAN906F_PBPTR_BITS 15
+#define NVIF_CHAN906F_PBPTR_MASK ((1 << NVIF_CHAN906F_PBPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_GPPTR_SHIFT NVIF_CHAN906F_PBPTR_BITS
+#define NVIF_CHAN906F_GPPTR_BITS (32 - NVIF_CHAN906F_PBPTR_BITS)
+#define NVIF_CHAN906F_GPPTR_MASK ((1 << NVIF_CHAN906F_GPPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_SEM_RELEASE_SIZE 5
+
+static int
+nvif_chan906f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHAN906F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(addr)),
+
+ SEMAPHOREB, lower_32_bits(addr),
+
+ SEMAPHOREC, data,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, DIS) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE));
+
+ return 0;
+}
+
+int
+nvif_chan906f_gpfifo_post(struct nvif_chan *chan, u32 gpptr, u32 pbptr)
+{
+ return chan->func->sem.release(chan, chan->sema.addr,
+ (gpptr << NVIF_CHAN906F_GPPTR_SHIFT) | pbptr);
+}
+
+u32
+nvif_chan906f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) >> NVIF_CHAN906F_GPPTR_SHIFT;
+}
+
+u32
+nvif_chan906f_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) & NVIF_CHAN906F_PBPTR_MASK;
+}
+
+static const struct nvif_chan_func
+nvif_chan906f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHAN906F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chan906f_sem_release,
+};
+
+int
+nvif_chan906f_ctor_(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *chan)
+{
+ nvif_chan_gpfifo_ctor(func, userd, gpfifo, gpfifo_size, push, push_addr, push_size, chan);
+ chan->sema.map.ptr = sema;
+ chan->sema.addr = sema_addr;
+ return 0;
+}
+
+int
+nvif_chan906f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr)
+{
+ return nvif_chan906f_ctor_(&nvif_chan906f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
new file mode 100644
index 000000000000..ca02b939c3fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+
+#include <nvif/push906f.h>
+#include <nvhw/class/clc36f.h>
+
+static void
+nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan)
+{
+ struct nvif_user *usermode = chan->usermode;
+
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+
+ wmb(); /* ensure CPU writes are flushed to BAR1 */
+ nvif_rd32(&chan->userd, 0); /* ensure BAR1 writes are flushed to vidmem */
+
+ usermode->func->doorbell(usermode, chan->doorbell_token);
+}
+
+#define NVIF_CHANC36F_SEM_RELEASE_SIZE 6
+
+static int
+nvif_chanc36f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHANC36F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(addr),
+
+ SEM_ADDR_HI, upper_32_bits(addr),
+
+ SEM_PAYLOAD_LO, data);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, DIS) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ return 0;
+}
+
+static const struct nvif_chan_func
+nvif_chanc36f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chanc36f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHANC36F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chanc36f_sem_release,
+};
+
+int
+nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token)
+{
+ int ret;
+
+ ret = nvif_chan906f_ctor_(&nvif_chanc36f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+ if (ret)
+ return ret;
+
+ chan->usermode = usermode;
+ chan->doorbell_token = doorbell_token;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index 9ee18cb99264..5a1a83c62a2a 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -30,17 +30,17 @@ int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
- struct {
- struct nvif_event_v0 base;
- struct nvif_conn_event_v0 conn;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_conn_event_v0));
+ struct nvif_conn_event_v0 *args_conn =
+ (struct nvif_conn_event_v0 *)args->data;
int ret;
- args.conn.version = 0;
- args.conn.types = types;
+ args_conn->version = 0;
+ args_conn->types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
- func, true, &args.base, sizeof(args), false, event);
+ func, true, args, __struct_size(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 14da22fa3b5b..fa42146252da 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GB202_DISP, 0 },
{ AD102_DISP, 0 },
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 6daeb7f0b09b..32f6c5eb92af 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
- struct {
- struct nvif_outp_hda_eld_v0 mthd;
- u8 data[128];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
int ret;
- if (WARN_ON(size > ARRAY_SIZE(args.data)))
+ if (WARN_ON(size > __member_size(mthd->data)))
return -EINVAL;
- args.mthd.version = 0;
- args.mthd.head = head;
+ mthd->version = 0;
+ mthd->head = head;
- memcpy(args.data, data, size);
- ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
+ memcpy(mthd->data, data, size);
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
index b648a5e036af..53f03fa1c9c2 100644
--- a/drivers/gpu/drm/nouveau/nvif/user.c
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -41,9 +41,11 @@ nvif_user_ctor(struct nvif_device *device, const char *name)
int version;
const struct nvif_user_func *func;
} users[] = {
- { AMPERE_USERMODE_A, -1, &nvif_userc361 },
- { TURING_USERMODE_A, -1, &nvif_userc361 },
- { VOLTA_USERMODE_A, -1, &nvif_userc361 },
+ { BLACKWELL_USERMODE_A, -1, &nvif_userc361 },
+ { HOPPER_USERMODE_A, -1, &nvif_userc361 },
+ { AMPERE_USERMODE_A, -1, &nvif_userc361 },
+ { TURING_USERMODE_A, -1, &nvif_userc361 },
+ { VOLTA_USERMODE_A, -1, &nvif_userc361 },
{}
};
int cid, ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 2e48b0816670..ddcf8782d6b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
-include $(src)/nvkm/engine/nvjpg/Kbuild
-include $(src)/nvkm/engine/ofa/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 165d61fc5d6c..9754bac65df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -10,5 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o
nvkm-y += nvkm/engine/ce/tu102.o
nvkm-y += nvkm/engine/ce/ga100.o
nvkm-y += nvkm/engine/ce/ga102.o
-
-nvkm-y += nvkm/engine/ce/r535.o
+nvkm-y += nvkm/engine/ce/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 9427a592bd16..1c0c60138706 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index ce56ede7c2e9..9359c5e7aa3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
new file mode 100644
index 000000000000..37c3c619c71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_ce.h>
+
+u32
+gb202_ce_grce_mask(struct nvkm_device *device)
+{
+ u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK);
+
+ return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 806a76a72249..34fd2657134b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *);
int ga100_ce_init(struct nvkm_engine *);
int ga100_ce_fini(struct nvkm_engine *, bool);
int ga100_ce_nonstall(struct nvkm_engine *);
+
+u32 gb202_ce_grce_mask(struct nvkm_device *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
deleted file mode 100644
index bd0d435dbbd3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
-
-struct r535_ce_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ce_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ce_obj = {
- .dtor = r535_ce_obj_dtor,
-};
-
-static int
-r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ce_obj *obj;
- NVC0B5_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->version = 1;
- args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ce_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ce_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ce_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 7c8647dcb349..67d0545cf902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9093d89b16f3..3375a59ebf1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2529,9 +2529,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
- .nvdec = { 0x0000001f, ga100_nvdec_new },
- .nvjpg = { 0x00000001, ga100_nvjpg_new },
- .ofa = { 0x00000001, ga100_ofa_new },
};
static const struct nvkm_device_chip
@@ -2561,8 +2558,6 @@ nv172_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2593,8 +2588,6 @@ nv173_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2625,8 +2618,6 @@ nv174_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2657,8 +2648,6 @@ nv176_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2689,12 +2678,26 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
+nv180_chipset = {
+ .name = "GH100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gh100_fb_new },
+ .fsp = { 0x00000001, gh100_fsp_new },
+ .gsp = { 0x00000001, gh100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv192_chipset = {
.name = "AD102",
.bar = { 0x00000001, tu102_bar_new },
@@ -2709,14 +2712,9 @@ nv192_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2735,14 +2733,9 @@ nv193_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2761,14 +2754,9 @@ nv194_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2787,14 +2775,9 @@ nv196_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2813,17 +2796,122 @@ nv197_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
+static const struct nvkm_device_chip
+nv1a0_chipset = {
+ .name = "GB100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1a2_chipset = {
+ .name = "GB102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b2_chipset = {
+ .name = "GB202",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b3_chipset = {
+ .name = "GB203",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b5_chipset = {
+ .name = "GB205",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b6_chipset = {
+ .name = "GB206",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b7_chipset = {
+ .name = "GB207",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@@ -3065,8 +3153,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
- mmio_base = device->func->resource_addr(device, 0);
- mmio_size = device->func->resource_size(device, 0);
+ mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI);
device->pri = ioremap(mmio_base, mmio_size);
if (device->pri == NULL) {
@@ -3139,7 +3227,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
+ case 0x180: device->card_type = GH100; break;
case 0x190: device->card_type = AD100; break;
+ case 0x1a0: device->card_type = GB10x; break;
+ case 0x1b0: device->card_type = GB20x; break;
default:
break;
}
@@ -3242,11 +3333,19 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
+ case 0x180: device->chip = &nv180_chipset; break;
case 0x192: device->chip = &nv192_chipset; break;
case 0x193: device->chip = &nv193_chipset; break;
case 0x194: device->chip = &nv194_chipset; break;
case 0x196: device->chip = &nv196_chipset; break;
case 0x197: device->chip = &nv197_chipset; break;
+ case 0x1a0: device->chip = &nv1a0_chipset; break;
+ case 0x1a2: device->chip = &nv1a2_chipset; break;
+ case 0x1b2: device->chip = &nv1b2_chipset; break;
+ case 0x1b3: device->chip = &nv1b3_chipset; break;
+ case 0x1b5: device->chip = &nv1b5_chipset; break;
+ case 0x1b6: device->chip = &nv1b6_chipset; break;
+ case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 3ff6436007fa..8f0261a0d618 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device)
return container_of(device, struct nvkm_device_pci, device);
}
+static int
+nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar)
+{
+ int idx = 0;
+
+ if (bar == NVKM_BAR0_PRI)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR1_FB)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR2_INST)
+ return idx;
+
+ WARN_ON(1);
+ return -1;
+}
+
static resource_size_t
-nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_start(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0;
}
static resource_size_t
-nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_len(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e42b18820a95..75ee7506d443 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -11,6 +11,7 @@
#include <subdev/devinit.h>
#include <subdev/fault.h>
#include <subdev/fb.h>
+#include <subdev/fsp.h>
#include <subdev/fuse.h>
#include <subdev/gpio.h>
#include <subdev/gsp.h>
@@ -43,8 +44,6 @@
#include <engine/msvld.h>
#include <engine/nvenc.h>
#include <engine/nvdec.h>
-#include <engine/nvjpg.h>
-#include <engine/ofa.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78a83f904bbd..114e50ca1827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device)
}
static struct resource *
-nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
- return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+ int idx;
+
+ switch (bar) {
+ case NVKM_BAR0_PRI: idx = 0; break;
+ case NVKM_BAR1_FB : idx = 1; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx);
}
static resource_size_t
-nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
-nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index d7f75b3a43c8..58191b7a0494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -148,6 +148,9 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
+ case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break;
+ case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
+ case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
default:
args->v0.family = 0;
break;
@@ -209,8 +212,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 0);
- *size = device->func->resource_size(device, 0);
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ *size = device->func->resource_size(device, NVKM_BAR0_PRI);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e346e924fee8..e1aecd3fe96c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -27,9 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o
-nvkm-y += nvkm/engine/disp/ad102.o
-
-nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
deleted file mode 100644
index 7f300a79aa29..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "chan.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_disp_func
-ad102_disp = {
- .uevent = &gv100_disp_chan_uevent,
- .ramht_size = 0x2000,
- .root = { 0, 0,AD102_DISP },
- .user = {
- {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
- {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
- {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
- {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
- {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
- {}
- },
-};
-
-int
-ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_disp **pdisp)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index 4e43ee383c34..9b84e357d354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
- const u64 base = device->func->resource_addr(device, 0);
+ const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index cfa3698d3a2f..614921166fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
struct gv100_disp_caps *caps = gv100_disp_caps(object);
struct nvkm_device *device = caps->disp->engine.subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI);
*size = 0x1000;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index aff92848abfe..376e9c3bcb1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -25,8 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/ga100.o
nvkm-y += nvkm/engine/fifo/ga102.o
-
-nvkm-y += nvkm/engine/fifo/r535.o
+nvkm-y += nvkm/engine/fifo/gb202.o
nvkm-y += nvkm/engine/fifo/ucgrp.o
nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 22443fe4a39f..fdffa0391b31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
}
/* Allocate USERD + BAR1 polling area. */
- if (fifo->func->chan.func->userd->bar == 1) {
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
@@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
- mutex_destroy(&fifo->userd.mutex);
-
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
@@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
- INIT_LIST_HEAD(&fifo->userd.list);
- mutex_init(&fifo->userd.mutex);
-
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 7d4716dcd512..e5bbd8563007 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -104,7 +104,7 @@ nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_c
if (cctx) {
refcount_inc(&cctx->refs);
*pcctx = cctx;
- mutex_unlock(&chan->cgrp->mutex);
+ mutex_unlock(&cgrp->mutex);
return 0;
}
@@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan)
nvkm_gpuobj_del(&chan->ramfc);
if (chan->cgrp) {
- if (!chan->func->id_put)
- nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
- else
- chan->func->id_put(chan);
-
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
nvkm_cgrp_unref(&chan->cgrp);
}
@@ -359,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
/* Validate arguments against class requirements. */
if ((runq && runq >= runl->func->runqs) ||
(!func->inst->vmm != !vmm) ||
- ((func->userd->bar < 0) == !userd) ||
+ (!func->userd->bar == !userd) ||
(!func->ramfc->ctxdma != !dmaobj) ||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
(!func->ramfc->priv && priv)) {
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
"push:%d:%p devm:%08x:%08x priv:%d:%d",
runl->func->runqs, runq, func->inst->vmm, vmm,
- func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
+ func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
func->ramfc->devm, devm, func->ramfc->priv, priv);
return -EINVAL;
}
@@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
}
/* Allocate channel ID. */
- if (!chan->func->id_get) {
- chan->id = nvkm_chid_get(runl->chid, chan);
- if (chan->id >= 0) {
- if (func->userd->bar < 0) {
- if (ouserd + chan->func->userd->size >=
- nvkm_memory_size(userd)) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- ret = nvkm_memory_kmap(userd, &chan->userd.mem);
- if (ret) {
- RUNL_DEBUG(runl, "userd %d", ret);
- return ret;
- }
-
- chan->userd.base = ouserd;
- } else {
- chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
- chan->userd.base = chan->id * chan->func->userd->size;
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id >= 0) {
+ if (!func->userd->bar) {
+ if (ouserd + chan->func->userd->size >=
+ nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
}
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
}
- } else {
- chan->id = chan->func->id_get(chan, userd, ouserd);
}
if (chan->id < 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 013682a709d5..445db5dfd1e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,9 +17,6 @@ struct nvkm_cctx {
};
struct nvkm_chan_func {
- int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
- void (*id_put)(struct nvkm_chan *);
-
const struct nvkm_chan_func_inst {
u32 size;
bool zero;
@@ -27,7 +24,7 @@ struct nvkm_chan_func {
} *inst;
const struct nvkm_chan_func_userd {
- int bar;
+ enum nvkm_bar_id bar;
u32 base;
u32 size;
void (*clear)(struct nvkm_chan *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
new file mode 100644
index 000000000000..b469e8afeb0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+u32
+gb202_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 6c94451d0faa..e4a4fad2eafc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan)
static const struct nvkm_chan_func_userd
gf100_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x1000,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index d8a4d773a58c..5655eda52a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -113,7 +113,7 @@ gk104_chan_ramfc = {
const struct nvkm_chan_func_userd
gk104_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
@@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
- if (fifo->func->chan.func->userd->bar == 1)
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 33066c8cdc64..d7f046c03cfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -70,7 +70,6 @@ gv100_chan_ramfc = {
const struct nvkm_chan_func_userd
gv100_chan_userd = {
- .bar = -1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 674faf002b20..c4b8e567d86f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -154,7 +154,7 @@ nv04_chan_ramfc = {
const struct nvkm_chan_func_userd
nv04_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0x800000,
.size = 0x010000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index e50a94b6d7f8..084ca5561ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -93,7 +93,7 @@ nv40_chan_ramfc = {
static const struct nvkm_chan_func_userd
nv40_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x001000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 954b5f3a7d57..7bf77661157d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -124,7 +124,7 @@ nv50_chan_ramfc = {
const struct nvkm_chan_func_userd
nv50_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x002000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index a0f3277605a5..5e81ae195329 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,7 @@
#include <core/enum.h>
struct nvkm_cctx;
struct nvkm_cgrp;
+struct nvkm_chan;
struct nvkm_engn;
struct nvkm_memory;
struct nvkm_runl;
@@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
@@ -206,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce;
extern const struct nvkm_cgrp_func ga100_cgrp;
extern const struct nvkm_chan_func ga100_chan;
+u32 gb202_chan_doorbell_handle(struct nvkm_chan *);
+
int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *,
void *argv, u32 argc, struct nvkm_object **);
int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 1d39a6840a40..c5a03298e88c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -31,7 +31,7 @@
#include <nvif/class.h>
-static u32
+u32
tu102_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->id << 16) | chan->id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 9e56bcc166ed..52420a1edca5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
- if (chan->func->userd->bar < 0)
+ if (!chan->func->userd->bar)
return -ENOSYS;
*type = NVKM_OBJECT_MAP_IO;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1555f8c40b4f..b5418f05ccd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -41,9 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ga102.o
-nvkm-y += nvkm/engine/gr/ad102.o
-
-nvkm-y += nvkm/engine/gr/r535.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index d285c597aff9..2b51f1d0c281 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -352,7 +352,7 @@ int
ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index b0e0c9305034..54f686ba39ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
-int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 02a8c62a0a32..13407fafe947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base)
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index a5e1f02791b4..b609b0150ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base)
}
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
switch (device->chipset) {
case 0x40:
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
deleted file mode 100644
index f4bed3eb1ec2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/memory.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu/vmm.h>
-#include <engine/fifo/priv.h>
-
-#include <nvif/if900d.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-
-#define r535_gr(p) container_of((p), struct r535_gr, base)
-
-#define R515_GR_MAX_CTXBUFS 9
-
-struct r535_gr {
- struct nvkm_gr base;
-
- struct {
- u16 bufferId;
- u32 size;
- u8 page;
- u8 align;
- bool global;
- bool init;
- bool ro;
- } ctxbuf[R515_GR_MAX_CTXBUFS];
- int ctxbuf_nr;
-
- struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_chan {
- struct nvkm_object object;
- struct r535_gr *gr;
-
- struct nvkm_vmm *vmm;
- struct nvkm_chan *chan;
-
- struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_gr_obj_dtor(struct nvkm_object *object)
-{
- struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_gr_obj = {
- .dtor = r535_gr_obj_dtor,
-};
-
-static int
-r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
- struct r535_gr_obj *obj;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
- &obj->rm);
-}
-
-static void *
-r535_gr_chan_dtor(struct nvkm_object *object)
-{
- struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
- struct r535_gr *gr = grc->gr;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- nvkm_vmm_put(grc->vmm, &grc->vma[i]);
- nvkm_memory_unref(&grc->mem[i]);
- }
-
- nvkm_vmm_unref(&grc->vmm);
- return grc;
-}
-
-static const struct nvkm_object_func
-r535_gr_chan = {
- .dtor = r535_gr_chan_dtor,
-};
-
-static int
-r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
- struct nvkm_memory **pmem, struct nvkm_vma **pvma,
- struct nvkm_gsp_object *chan)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
- NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
-
- ctrl->engineType = 1;
- ctrl->hChanClient = vmm->rm.client.object.handle;
- ctrl->hObject = chan->handle;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
- &ctrl->promoteEntry[ctrl->entryCount];
- const bool alloc = golden || !gr->ctxbuf[i].global;
- int ret;
-
- entry->bufferId = gr->ctxbuf[i].bufferId;
- entry->bInitialize = gr->ctxbuf[i].init && alloc;
-
- if (alloc) {
- ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
- NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
- gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
- gr->ctxbuf[i].init, &pmem[i]);
- if (WARN_ON(ret))
- return ret;
-
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
- entry->bNonmapped = 1;
- } else {
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
- continue;
-
- pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
- }
-
- if (!entry->bNonmapped) {
- struct gf100_vmm_map_v0 args = {
- .priv = 1,
- .ro = gr->ctxbuf[i].ro,
- };
-
- mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
- nvkm_memory_size(pmem[i]), &pvma[i]);
- mutex_unlock(&vmm->mutex.vmm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
- if (ret)
- return ret;
-
- entry->gpuVirtAddr = pvma[i]->addr;
- }
-
- if (entry->bInitialize) {
- entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
- entry->size = gr->ctxbuf[i].size;
- entry->physAttr = 4;
- }
-
- nvkm_debug(subdev,
- "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
- entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
- entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
-
- ctrl->entryCount++;
- }
-
- return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
- struct nvkm_object **pobject)
-{
- struct r535_gr *gr = r535_gr(base);
- struct r535_gr_chan *grc;
- int ret;
-
- if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
- grc->gr = gr;
- grc->vmm = nvkm_vmm_ref(chan->vmm);
- grc->chan = chan;
- *pobject = &grc->object;
-
- ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u64
-r535_gr_units(struct nvkm_gr *gr)
-{
- struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
-
- return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
-}
-
-static int
-r535_gr_oneinit(struct nvkm_gr *base)
-{
- NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
- struct r535_gr *gr = container_of(base, typeof(*gr), base);
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
- struct nvkm_mmu *mmu = device->mmu;
- struct {
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
- struct nvkm_gsp_object chan;
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
- } golden = {};
- int ret;
-
- /* Allocate a channel to use for golden context init. */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
- if (ret)
- goto done;
-
- ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
- if (ret)
- goto done;
-
- ret = mmu->func->promote_vmm(golden.vmm);
- if (ret)
- goto done;
-
- {
- NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-
- args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
- device->fifo->func->chan.user.oclass,
- sizeof(*args), &golden.chan);
- if (IS_ERR(args)) {
- ret = PTR_ERR(args);
- goto done;
- }
-
- args->gpFifoOffset = 0;
- args->gpFifoEntries = 0x1000 / 8;
- args->flags =
- NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
- NVDEF(NVOS04, FLAGS, VPR, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
- NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
- NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
- NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
- NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
- NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
- NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = golden.vmm->rm.object.handle;
- args->engineType = 1;
- args->instanceMem.base = nvkm_memory_addr(golden.inst);
- args->instanceMem.size = 0x1000;
- args->instanceMem.addressSpace = 2;
- args->instanceMem.cacheAttrib = 1;
- args->ramfcMem.base = nvkm_memory_addr(golden.inst);
- args->ramfcMem.size = 0x200;
- args->ramfcMem.addressSpace = 2;
- args->ramfcMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
- args->userdMem.size = 0x200;
- args->userdMem.addressSpace = 2;
- args->userdMem.cacheAttrib = 1;
- args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
- args->mthdbufMem.size = 0x5000;
- args->mthdbufMem.addressSpace = 2;
- args->mthdbufMem.cacheAttrib = 1;
- args->internalFlags =
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
- ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
- if (ret)
- goto done;
- }
-
- /* Fetch context buffer info from RM and allocate each of them here to use
- * during golden context init (or later as a global context buffer).
- *
- * Also build the information that'll be used to create channel contexts.
- */
- info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
- sizeof(*info));
- if (WARN_ON(IS_ERR(info))) {
- ret = PTR_ERR(info);
- goto done;
- }
-
- for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
- static const struct {
- u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
- u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
- bool global;
- bool init;
- bool ro;
- } map[] = {
-#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
- .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
- .global = (G), .init = (I), .ro = (R) }
-#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
- /* global init ro */
- _A( GRAPHICS, MAIN, false, true, false),
- _B( PATCH, false, true, false),
- _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
- _B( PAGEPOOL, true, false, false),
- _B( ATTRIBUTE_CB, true, false, false),
- _B( RTV_CB_GLOBAL, true, false, false),
- _B( FECS_EVENT, true, true, false),
- _B( PRIV_ACCESS_MAP, true, true, true),
-#undef _B
-#undef _A
- };
- u32 size = info->engineContextBuffersInfo[0].engine[i].size;
- u8 align, page;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(map); id++) {
- if (map[id].id0 == i)
- break;
- }
-
- nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
- size, (id < ARRAY_SIZE(map)) ? "*" : "");
- if (id >= ARRAY_SIZE(map))
- continue;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
- size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
-
- if (size >= 1 << 21) page = 21;
- else if (size >= 1 << 16) page = 16;
- else page = 12;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
- align = order_base_2(size);
- else
- align = page;
-
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
- gr->ctxbuf[gr->ctxbuf_nr].size = size;
- gr->ctxbuf[gr->ctxbuf_nr].page = page;
- gr->ctxbuf[gr->ctxbuf_nr].align = align;
- gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
- gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
- gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
- gr->ctxbuf_nr++;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
- gr->ctxbuf[gr->ctxbuf_nr].bufferId =
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
- gr->ctxbuf_nr++;
- }
- }
-
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
-
- /* Promote golden context to RM. */
- ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
- if (ret)
- goto done;
-
- /* Allocate 3D class on channel to trigger golden context init in RM. */
- {
- int i;
-
- for (i = 0; gr->base.func->sclass[i].ctor; i++) {
- if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
- struct nvkm_gsp_object threed;
-
- ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
- gr->base.func->sclass[i].oclass, 0,
- &threed);
- if (ret)
- goto done;
-
- nvkm_gsp_rm_free(&threed);
- break;
- }
- }
-
- if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
- ret = -EINVAL;
- goto done;
- }
- }
-
-done:
- nvkm_gsp_rm_free(&golden.chan);
- for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
- nvkm_vmm_put(golden.vmm, &golden.vma[i]);
- nvkm_vmm_unref(&golden.vmm);
- nvkm_memory_unref(&golden.inst);
- return ret;
-
-}
-
-static void *
-r535_gr_dtor(struct nvkm_gr *base)
-{
- struct r535_gr *gr = r535_gr(base);
-
- while (gr->ctxbuf_nr)
- nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
-
- kfree(gr->base.func);
- return gr;
-}
-
-int
-r535_gr_new(const struct gf100_gr_func *hw,
- struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- struct nvkm_gr_func *rm;
- struct r535_gr *gr;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_gr_dtor;
- rm->oneinit = r535_gr_oneinit;
- rm->units = r535_gr_units;
- rm->chan_new = r535_gr_chan_new;
-
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_gr_obj_ctor;
- }
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- *pgr = &gr->base;
-
- return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index b7a458e9040a..bda8054c6b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -219,7 +219,7 @@ int
tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 2b0e923cb755..37b0cdc760c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -2,8 +2,4 @@
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gm107.o
nvkm-y += nvkm/engine/nvdec/tu102.o
-nvkm-y += nvkm/engine/nvdec/ga100.o
nvkm-y += nvkm/engine/nvdec/ga102.o
-nvkm-y += nvkm/engine/nvdec/ad102.o
-
-nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
index 022a9c824304..eea6368adae2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
@@ -23,16 +23,6 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvdec_gsp = {
- .sclass = {
- { -1, -1, NVC7B0_VIDEO_DECODER },
- {}
- }
-};
-
static const struct nvkm_falcon_func
ga102_nvdec_flcn = {
.disable = gm200_flcn_disable,
@@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index f506ae83bfd7..f8d43e913093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
-
-int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
deleted file mode 100644
index 75a24f3e6617..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvdec_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvdec_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvdec_obj = {
- .dtor = r535_nvdec_obj_dtor,
-};
-
-static int
-r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvdec_obj *obj;
- NV_BSP_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvdec_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-
- kfree(nvdec->engine.func);
- return nvdec;
-}
-
-int
-r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvdec_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvdec_obj_ctor;
- }
-
- if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
index 808c8e010b9e..fe95b6e22f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvdec = {
- .sclass = {
- { -1, -1, NVC4B0_VIDEO_DECODER },
- {}
- }
-};
-
int
tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 2c1495b730f3..6dcb20d1d156 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -2,7 +2,3 @@
nvkm-y += nvkm/engine/nvenc/base.o
nvkm-y += nvkm/engine/nvenc/gm107.o
nvkm-y += nvkm/engine/nvenc/tu102.o
-nvkm-y += nvkm/engine/nvenc/ga102.o
-nvkm-y += nvkm/engine/nvenc/ad102.o
-
-nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
deleted file mode 100644
index 6463ab8e5871..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvenc = {
- .sclass = {
- { -1, -1, NVC7B7_VIDEO_ENCODER },
- {}
- }
-};
-
-int
-ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 7917affc6505..b097e3f2867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
-
-int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
deleted file mode 100644
index c8a2a9196ce5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvenc_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvenc_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvenc_obj = {
- .dtor = r535_nvenc_obj_dtor,
-};
-
-static int
-r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvenc_obj *obj;
- NV_MSENC_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvenc_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
-
- kfree(nvenc->engine.func);
- return nvenc;
-}
-
-int
-r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvenc_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvenc_obj_ctor;
- }
-
- if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
index 933864423bb3..8a436b398749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvenc = {
- .sclass = {
- { -1, -1, NVC4B7_VIDEO_ENCODER },
- {}
- }
-};
-
int
tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+ return -ENODEV;
return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
deleted file mode 100644
index 1408f664add6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/nvjpg/ga100.o
-nvkm-y += nvkm/engine/nvjpg/ad102.o
-
-nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
deleted file mode 100644
index 62705dc6494c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_nvjpg = {
- .sclass = {
- { -1, -1, NVC9D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
deleted file mode 100644
index f550eb07da5a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_nvjpg = {
- .sclass = {
- { -1, -1, NVC4D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
deleted file mode 100644
index 1e80cf70033a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_PRIV_H__
-#define __NVKM_NVJPG_PRIV_H__
-#include <engine/nvjpg.h>
-
-int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
deleted file mode 100644
index 1babddc4eb80..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvjpg_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvjpg_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvjpg_obj = {
- .dtor = r535_nvjpg_obj_dtor,
-};
-
-static int
-r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvjpg_obj *obj;
- NV_NVJPG_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvjpg_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvjpg_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
deleted file mode 100644
index 99f1713d7e51..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/ofa/ga100.o
-nvkm-y += nvkm/engine/ofa/ga102.o
-nvkm-y += nvkm/engine/ofa/ad102.o
-
-nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
deleted file mode 100644
index caf29e6bddb4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_PRIV_H__
-#define __NVKM_OFA_PRIV_H__
-#include <engine/ofa.h>
-
-int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
deleted file mode 100644
index 438dc692eefe..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_ofa_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ofa_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ofa_obj = {
- .dtor = r535_ofa_obj_dtor,
-};
-
-static int
-r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ofa_obj *obj;
- NV_OFA_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ofa_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ofa_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ofa_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4c2f6fc4ef58..c19ea4ea9bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
include $(src)/nvkm/subdev/fb/Kbuild
include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/fsp/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 9754c6872543..8faee3317a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
-
-nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 51070b7dda85..e5e60915029c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
static int
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
- struct lock_class_key *key, int bar_nr)
+ struct lock_class_key *key, enum nvkm_bar_id bar_id)
{
struct nvkm_device *device = bar->base.subdev.device;
resource_size_t bar_len;
@@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
if (ret)
return ret;
- bar_len = device->func->resource_size(device, bar_nr);
+ bar_len = device->func->resource_size(device, bar_id);
if (!bar_len)
return -ENOMEM;
- if (bar_nr == 3 && bar->bar2_halve)
+ if (bar_id == NVKM_BAR2_INST && bar->bar2_halve)
bar_len >>= 1;
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
- (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
+ (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
@@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
/*
* Bootstrap page table lookup.
*/
- if (bar_nr == 3) {
+ if (bar_id == NVKM_BAR2_INST) {
ret = nvkm_vmm_boot(bar_vm->vmm);
if (ret)
return ret;
@@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
if (bar->base.func->bar2.init) {
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST);
if (ret)
return ret;
@@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
}
/* BAR1 */
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 27d8a1be43e4..6a881becb02c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- size = device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, NVKM_BAR2_INST);
if (!size)
return -ENOMEM;
limit = start + size;
@@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- size = device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, NVKM_BAR1_FB);
if (!size)
return -ENOMEM;
limit = start + size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 6c5bbff12eb4..b918e22df5a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -47,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
- return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
- dev->func->resource_size(dev, 1));
+ return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB),
+ dev->func->resource_size(dev, NVKM_BAR1_FB));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
index c123e5893d76..cd2fbc0472d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
struct nvkm_device *device = buffer->fault->subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 3) + buffer->addr;
+ *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr;
*size = nvkm_memory_size(buffer->mem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d1611ad3bf81..8d8a5382d1b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -35,6 +35,9 @@ nvkm-y += nvkm/subdev/fb/gv100.o
nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
+nvkm-y += nvkm/subdev/fb/gh100.o
+nvkm-y += nvkm/subdev/fb/gb100.o
+nvkm-y += nvkm/subdev/fb/gb202.o
nvkm-y += nvkm/subdev/fb/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 25f82b372bca..2819780050d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -25,7 +25,7 @@
#include <subdev/gsp.h>
#include <engine/nvdec.h>
-static u64
+u64
ga102_fb_vidmem_size(struct nvkm_fb *fb)
{
return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
new file mode 100644
index 000000000000..1c78c8853617
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb100/dev_hshub_base.h>
+
+static void
+gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr);
+ const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr);
+ const u32 hshub = DRF_LO(NV_PFB_HSHUB0);
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+}
+
+static const struct nvkm_fb_func
+gb100_fb = {
+ .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
new file mode 100644
index 000000000000..848505026d02
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb10b/dev_fbhub.h>
+
+static void
+gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ const u64 addr = fb->sysmem.flush_page_addr;
+
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gb202_fb = {
+ .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb202_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
new file mode 100644
index 000000000000..2d8c51f882d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fb.h>
+
+static void
+gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT;
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gh100_fb = {
+ .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gh100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 35c55dfba23d..ebe996503ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
+
+u64 ga102_fb_vidmem_size(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
new file mode 100644
index 000000000000..1a9ded3a86f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/fsp/base.o
+nvkm-y += nvkm/subdev/fsp/gh100.o
+nvkm-y += nvkm/subdev/fsp/gb100.o
+nvkm-y += nvkm/subdev/fsp/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
new file mode 100644
index 000000000000..e366a980baa9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+int
+nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume,
+ img_addr, hash, pkey, sig);
+}
+
+bool
+nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size)
+{
+ return hash_size == fsp->func->cot.size_hash &&
+ pkey_size == fsp->func->cot.size_pkey &&
+ sig_size == fsp->func->cot.size_sig;
+}
+
+static int
+nvkm_fsp_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ return fsp->func->wait_secure_boot(fsp);
+}
+
+static void *
+nvkm_fsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ nvkm_falcon_dtor(&fsp->falcon);
+ return fsp;
+}
+
+static const struct nvkm_falcon_func
+nvkm_fsp_flcn = {
+ .emem_pio = &gp102_flcn_emem_pio,
+};
+
+static const struct nvkm_subdev_func
+nvkm_fsp = {
+ .dtor = nvkm_fsp_dtor,
+ .preinit = nvkm_fsp_preinit,
+};
+
+int
+nvkm_fsp_new_(const struct nvkm_fsp_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fsp **pfsp)
+{
+ struct nvkm_fsp *fsp;
+
+ fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL);
+ if (!fsp)
+ return -ENOMEM;
+
+ fsp->func = func;
+ nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev);
+
+ return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
new file mode 100644
index 000000000000..e06636bf54b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_fsp_func
+gb100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
new file mode 100644
index 000000000000..3438aac6383e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_therm.h>
+
+static int
+gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gb202_fsp = {
+ .wait_secure_boot = gb202_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb202_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
new file mode 100644
index 000000000000..2815be4bf5de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fsp_pri.h>
+#include <nvhw/ref/gh100/dev_therm.h>
+
+#include <nvrm/nvtypes.h>
+
+#define MCTP_HEADER_VERSION 3:0
+#define MCTP_HEADER_RSVD 7:4
+
+#define MCTP_HEADER_DEID 15:8
+#define MCTP_HEADER_SEID 23:16
+
+#define MCTP_HEADER_TAG 26:24
+#define MCTP_HEADER_TO 27:27
+#define MCTP_HEADER_SEQ 29:28
+#define MCTP_HEADER_EOM 30:30
+#define MCTP_HEADER_SOM 31:31
+
+#define MCTP_MSG_HEADER_TYPE 6:0
+#define MCTP_MSG_HEADER_IC 7:7
+
+#define MCTP_MSG_HEADER_VENDOR_ID 23:8
+#define MCTP_MSG_HEADER_NVDM_TYPE 31:24
+
+#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e
+#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de
+
+#define NVDM_TYPE_COT 0x14
+#define NVDM_TYPE_FSP_RESPONSE 0x15
+
+#pragma pack(1)
+typedef struct nvdm_payload_cot
+{
+ NvU16 version;
+ NvU16 size;
+ NvU64 gspFmcSysmemOffset;
+ NvU64 frtsSysmemOffset;
+ NvU32 frtsSysmemSize;
+
+ // Note this is an offset from the end of FB
+ NvU64 frtsVidmemOffset;
+ NvU32 frtsVidmemSize;
+
+ // Authentication related fields
+ NvU32 hash384[12];
+ NvU32 publicKey[96];
+ NvU32 signature[96];
+
+ NvU64 gspBootArgsSysmemOffset;
+} NVDM_PAYLOAD_COT;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct
+{
+ NvU32 taskId;
+ NvU32 commandNvdmType;
+ NvU32 errorCode;
+} NVDM_PAYLOAD_COMMAND_RESPONSE;
+#pragma pack()
+
+static u32
+gh100_fsp_poll(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 head, tail;
+
+ head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0));
+ tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0));
+
+ if (head == tail)
+ return 0;
+
+ return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */
+}
+
+static int
+gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 packet_size;
+ int ret;
+
+ packet_size = gh100_fsp_poll(fsp);
+ if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size))
+ return -EINVAL;
+
+ ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size);
+ if (ret)
+ return ret;
+
+ nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0);
+ nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0);
+
+ return packet_size;
+}
+
+static int
+gh100_fsp_wait(struct nvkm_fsp *fsp)
+{
+ int time = 1000;
+
+ do {
+ if (gh100_fsp_poll(fsp))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static int
+gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ int time = 1000, ret;
+
+ if (WARN_ON(packet_size % sizeof(u32)))
+ return -EINVAL;
+
+ /* Ensure any previously sent message has been consumed. */
+ do {
+ u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0));
+ u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0));
+
+ if (tail == head)
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0)
+ return -ETIMEDOUT;
+
+ /* Write message to EMEM. */
+ ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false);
+ if (ret)
+ return ret;
+
+ /* Update queue pointers - TAIL points at last DWORD written. */
+ nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32));
+ nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0);
+ return 0;
+}
+
+static int
+gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_subdev *subdev = &fsp->subdev;
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COMMAND_RESPONSE response;
+ } reply;
+ int ret;
+
+ ret = gh100_fsp_send(fsp, packet, packet_size);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_wait(fsp);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+
+ if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) ||
+ NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) {
+ nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header);
+ return -EIO;
+ }
+
+ if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) ||
+ NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) ||
+ NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) {
+ nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header);
+ return -EIO;
+ }
+
+ if (reply.response.commandNvdmType != nvdm_type) {
+ nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n",
+ nvdm_type, reply.response.commandNvdmType);
+ return -EIO;
+ }
+
+ if (reply.response.errorCode) {
+ nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n",
+ nvdm_type, reply.response.errorCode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COT cot;
+ } msg = {};
+
+ msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) |
+ NVVAL(MCTP, HEADER, EOM, 1) |
+ NVVAL(MCTP, HEADER, SEID, 0) |
+ NVVAL(MCTP, HEADER, SEQ, 0);
+
+ msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) |
+ NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) |
+ NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT);
+
+ msg.cot.version = fsp->func->cot.version;
+ msg.cot.size = sizeof(msg.cot);
+ msg.cot.gspFmcSysmemOffset = img_addr;
+ if (!resume) {
+ msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000);
+ msg.cot.frtsVidmemSize = 0x100000;
+ }
+
+ memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash);
+ memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey);
+ memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig);
+
+ msg.cot.gspBootArgsSysmemOffset = args_addr;
+
+ return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg));
+}
+
+int
+gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gh100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 1,
+ .size_hash = 48,
+ .size_pkey = 384,
+ .size_sig = 384,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gh100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
new file mode 100644
index 000000000000..f0b2c605c33d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_PRIV_H__
+#define __NVKM_FSP_PRIV_H__
+#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev)
+#include <subdev/fsp.h>
+
+struct nvkm_fsp_func {
+ int (*wait_secure_boot)(struct nvkm_fsp *);
+
+ struct {
+ u32 version;
+ u32 size_hash;
+ u32 size_pkey;
+ u32 size_sig;
+ int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+ } cot;
+};
+
+int nvkm_fsp_new_(const struct nvkm_fsp_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+
+int gh100_fsp_wait_secure_boot(struct nvkm_fsp *);
+int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 16bf2f1bb780..e9c948b67bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/gh100.o
nvkm-y += nvkm/subdev/gsp/ad102.o
+nvkm-y += nvkm/subdev/gsp/gb100.o
+nvkm-y += nvkm/subdev/gsp/gb202.o
-nvkm-y += nvkm/subdev/gsp/r535.o
+include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index c849c6299c52..eb765da0876e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -22,30 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-ad102_gsp_r535_113_01 = {
+ad102_gsp = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ad10x_gpu,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
{}
};
@@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index da1bebb896f7..d23243a83a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- if (!gsp->func->fini)
+ if (!gsp->func->fini || !gsp->running)
return 0;
return gsp->func->fini(gsp, suspend);
@@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
return gsp->func->oneinit(gsp);
}
+void
+nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.fmc);
+ gsp->fws.fmc = NULL;
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
@@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
gsp->func->dtor(gsp);
nvkm_falcon_dtor(&gsp->falcon);
+ kfree(gsp->rm);
return gsp;
}
@@ -101,6 +117,16 @@ nvkm_gsp = {
};
int
+nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
@@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
- gsp->rm = gsp->func->rm;
+
+ if (fwif->rm) {
+ nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver);
+
+ gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL);
+ if (!gsp->rm)
+ return -ENOMEM;
+
+ gsp->rm->device = device;
+ gsp->rm->gpu = fwif->func->rm.gpu;
+ gsp->rm->wpr = fwif->rm->wpr;
+ gsp->rm->api = fwif->rm->api;
+ }
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index 223f68b532ef..d201e8697226 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -39,29 +39,27 @@ ga100_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga100_gsp_r535_113_01 = {
+ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga100_gpu,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
- { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 4c4b4168a266..917f7e2f6c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -150,25 +150,21 @@ ga102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga102_gsp_r535_113_01 = {
+ga102_gsp_r535 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga1xx_gpu,
};
static const struct nvkm_gsp_func
@@ -178,7 +174,8 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
@@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
new file mode 100644
index 000000000000..12a3f2c1ed82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb10x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb10x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb100_gsps[] = {
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ {}
+};
+
+int
+gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb100, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb102, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
new file mode 100644
index 000000000000..c1d718172ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb202_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb20x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb20x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb202_gsps[] = {
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ {}
+};
+
+int
+gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb202, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb203, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb205, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb206, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb207, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
new file mode 100644
index 000000000000..ce31e8248807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <linux/elf.h>
+#include <linux/crc32.h>
+
+#include <subdev/fb.h>
+#include <subdev/fsp.h>
+
+#include <rm/r570/nvrm/gsp.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_falcon_v4.h>
+#include <nvhw/ref/gh100/dev_riscv_pri.h>
+
+int
+gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ struct nvkm_falcon *falcon = &gsp->falcon;
+ int ret, time = 4000;
+
+ /* Shutdown RM. */
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ /* Wait for RISC-V to halt. */
+ do {
+ u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL);
+
+ if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static bool
+gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0)
+{
+ u32 data;
+
+ /* Wait for GSP access via BAR0 to be allowed. */
+ *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0);
+
+ if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100)
+ return false;
+
+ /* Check if an error code has been reported. */
+ if (*mbox0) {
+ u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1);
+
+ /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */
+ if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr)
+ return true;
+ }
+
+ /* Check if lockdown has been released. */
+ data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2);
+ return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN);
+}
+
+int
+gh100_gsp_init(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ const bool resume = gsp->sr.meta.data != NULL;
+ struct nvkm_gsp_mem *meta;
+ GSP_FMC_BOOT_PARAMS *args;
+ int ret, time = 4000;
+ u32 rsvd_size;
+ u32 mbox0;
+
+ if (!resume) {
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args);
+ if (ret)
+ return ret;
+
+ meta = &gsp->wpr_meta;
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+ meta = &gsp->sr.meta;
+ }
+
+ args = gsp->fmc.args.data;
+
+ args->bootGspRmParams.gspRmDescOffset = meta->addr;
+ args->bootGspRmParams.gspRmDescSize = meta->size;
+ args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM;
+ args->bootGspRmParams.bIsGspRmBoot = 1;
+
+ args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM;
+ args->gspRmParams.bootArgsOffset = gsp->libos.addr;
+
+ rsvd_size = gsp->fb.heap.size;
+ if (gsp->rm->wpr->rsvd_size_pmu)
+ rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000);
+
+ ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume,
+ gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig);
+ if (ret)
+ return ret;
+
+ do {
+ if (gh100_gsp_lockdown_released(gsp, &mbox0))
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0) {
+ nvkm_error(subdev, "GSP-FMC boot timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mbox0) {
+ nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0);
+ return -EIO;
+ }
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ gsp->fb.bios.vga_workspace.size = 128 * 1024;
+ gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sizeOfRadix3Elf = gsp->fw.len;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp);
+ meta->frtsSize = 0x100000;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu;
+ return 0;
+}
+
+/* The sh_flags value for the binary blobs in the ELF image */
+#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC)
+
+#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr))
+#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr))
+
+/* The FMC ELF header must be exactly this */
+static const u8 elf_header[] = {
+ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */
+
+ ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */
+ ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */
+ 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */
+
+ 6, 0, 1, 0, /* e_shnum, e_shstrndx */
+};
+
+/**
+ * elf_validate_sections - validate each section in the FMC ELF image
+ * @elf: ELF image
+ * @length: size of the entire ELF image
+ */
+static bool
+elf_validate_sections(const void *elf, size_t length)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+
+ /* The offset of the first section */
+ Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize;
+
+ if (section_begin > length)
+ return false;
+
+ /* The first section header is the null section, so skip it */
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (i == ehdr->e_shstrndx) {
+ if (shdr[i].sh_type != SHT_STRTAB)
+ return false;
+ if (shdr[i].sh_flags != SHF_STRINGS)
+ return false;
+ } else {
+ if (shdr[i].sh_type != SHT_PROGBITS)
+ return false;
+ if (shdr[i].sh_flags != FMC_SHF_FLAGS)
+ return false;
+ }
+
+ /* Ensure that each section is inside the image */
+ if (shdr[i].sh_offset < section_begin ||
+ (u64)shdr[i].sh_offset + shdr[i].sh_size > length)
+ return false;
+
+ /* Non-zero sh_info is a CRC */
+ if (shdr[i].sh_info) {
+ /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */
+ u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0;
+
+ if (shdr[i].sh_info != crc32)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * elf_section - return a pointer to the data for a given section
+ * @elf: ELF image
+ * @name: section name to search for
+ * @len: pointer to returned length of found section
+ */
+static const void *
+elf_section(const void *elf, const char *name, unsigned int *len)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+ const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (!strcmp(&names[shdr[i].sh_name], name)) {
+ *len = shdr[i].sh_size;
+ return elf + shdr[i].sh_offset;
+ }
+ }
+
+ return NULL;
+}
+
+int
+gh100_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fsp *fsp = device->fsp;
+ const void *fw = gsp->fws.fmc->data;
+ const void *hash, *sig, *pkey, *img;
+ unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0;
+ int ret;
+
+ if (gsp->fws.fmc->size < ELF_HDR_SIZE ||
+ memcmp(fw, elf_header, sizeof(elf_header)) ||
+ !elf_validate_sections(fw, gsp->fws.fmc->size)) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ hash = elf_section(fw, "hash", &hash_len);
+ sig = elf_section(fw, "signature", &sig_len);
+ pkey = elf_section(fw, "publickey", &pkey_len);
+ img = elf_section(fw, "image", &img_len);
+
+ if (!hash || !sig || !pkey || !img) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len))
+ return -EINVAL;
+
+ /* Load GSP-FMC FW into memory. */
+ ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->fmc.fw.data, img, img_len);
+
+ gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL);
+ gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL);
+ gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL);
+ if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig)
+ return -ENOMEM;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ return gh100_gsp_wpr_meta_init(gsp);
+}
+
+static const struct nvkm_gsp_func
+gh100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gh100",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gh100_gpu,
+};
+
+int
+gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
+static struct nvkm_gsp_fwif
+gh100_gsps[] = {
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ {}
+};
+
+int
+gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gh100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 9f4a62375a27..4f14e85fc69e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_GSP_PRIV_H__
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
+#include <rm/gpu.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
@@ -11,12 +12,32 @@ struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const struct nvkm_rm_impl *rm;
const char *ver;
bool enable;
};
+int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
+ const struct firmware **);
+void nvkm_gsp_dtor_fws(struct nvkm_gsp *);
+
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
-int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *);
+
+int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
+
+#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
@@ -25,12 +46,6 @@ struct nvkm_gsp_func {
char *sig_section;
struct {
- u32 os_carveout_size;
- u32 base_size;
- u64 min_size;
- } wpr_heap;
-
- struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
@@ -41,7 +56,9 @@ struct nvkm_gsp_func {
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
- const struct nvkm_gsp_rm *rm;
+ struct {
+ const struct nvkm_rm_gpu *gpu;
+ } rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
@@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_init(struct nvkm_gsp *);
+int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
int tu102_gsp_reset(struct nvkm_gsp *);
+u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
@@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
+int gh100_gsp_oneinit(struct nvkm_gsp *);
+int gh100_gsp_init(struct nvkm_gsp *);
+int gh100_gsp_fini(struct nvkm_gsp *, bool suspend);
+
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
-extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
new file mode 100644
index 000000000000..04037394a2da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+nvkm-y += nvkm/subdev/gsp/rm/client.o
+nvkm-y += nvkm/subdev/gsp/rm/engine.o
+nvkm-y += nvkm/subdev/gsp/rm/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/nvenc.o
+
+nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ga100.o
+nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ad10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gh100.o
+nvkm-y += nvkm/subdev/gsp/rm/gb10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gb20x.o
+
+include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild
+include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
new file mode 100644
index 000000000000..e1ce6355c35f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ad10x_gpu = {
+ .disp.class = {
+ .root = AD102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = AD102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = ADA_A,
+ .compute = ADA_COMPUTE_A,
+ },
+ .nvdec.class = NVC9B0_VIDEO_DECODER,
+ .nvenc.class = NVC9B7_VIDEO_ENCODER,
+ .ofa.class = NVC9FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
new file mode 100644
index 000000000000..72d3e3ca84c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "rm.h"
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0);
+ struct nvkm_gsp *gsp = client->gsp;
+
+ if (!gsp)
+ return;
+
+ if (client->object.client)
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, id);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int id, ret;
+
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ mutex_lock(&gsp->client_id.mutex);
+ id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (id < 0)
+ return id;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id));
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
new file mode 100644
index 000000000000..3b0e83b2f57f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include "gpu.h"
+
+#include <core/object.h>
+#include <engine/fifo/chan.h>
+
+struct nvkm_rm_engine {
+ struct nvkm_engine engine;
+
+ struct nvkm_engine_func func;
+};
+
+struct nvkm_rm_engine_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void*
+nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+nvkm_rm_engine_obj = {
+ .dtor = nvkm_rm_engine_obj_dtor,
+};
+
+int
+nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_rm *rm = chan->client->gsp->rm;
+ const int inst = oclass->engine->subdev.inst;
+ const u32 class = oclass->base.oclass;
+ const u32 handle = oclass->handle;
+ struct nvkm_rm_engine_obj *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ switch (oclass->engine->subdev.type) {
+ case NVKM_ENGINE_CE:
+ ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_GR:
+ ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVENC:
+ ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVJPG:
+ ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_OFA:
+ ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+
+ if (ret) {
+ kfree(obj);
+ return ret;
+ }
+
+ nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+ return 0;
+}
+
+static int
+nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+
+ return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
+}
+
+static void *
+nvkm_rm_engine_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *engine)
+{
+ struct nvkm_engine_func *func;
+
+ func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = dtor;
+
+ for (int i = 0; i < nclass; i++) {
+ func->sclass[i].oclass = class[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
+ }
+
+ nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
+ return 0;
+}
+
+static int
+nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
+ struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine;
+ int ret;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
+ if (ret) {
+ kfree(engine);
+ return ret;
+ }
+
+ *pengine = engine;
+ return 0;
+}
+
+int
+nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
+{
+ const struct nvkm_rm_gpu *gpu = rm->gpu;
+ struct nvkm_device *device = rm->device;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
+ case NVKM_ENGINE_GR:
+ if (inst != 0)
+ return -ENODEV; /* MiG not supported, just ignore. */
+
+ return nvkm_rm_gr_new(rm);
+ case NVKM_ENGINE_NVDEC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
+ return -EINVAL;
+
+ return nvkm_rm_nvdec_new(rm, inst);
+ case NVKM_ENGINE_NVENC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
+ return -EINVAL;
+
+ return nvkm_rm_nvenc_new(rm, inst);
+ case NVKM_ENGINE_NVJPG:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
+ case NVKM_ENGINE_OFA:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
new file mode 100644
index 000000000000..5b8c9c3901d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_ENGINE_H__
+#define __NVKM_RM_ENGINE_H__
+#include "gpu.h"
+
+int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *);
+int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst);
+
+int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *,
+ struct nvkm_object **);
+
+int nvkm_rm_gr_new(struct nvkm_rm *);
+int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst);
+int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
new file mode 100644
index 000000000000..a48c6134075d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga100_gpu = {
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_A,
+ .compute = AMPERE_COMPUTE_A,
+ },
+ .nvdec.class = NVC6B0_VIDEO_DECODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
new file mode 100644
index 000000000000..50536ad7f85d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga1xx_gpu = {
+ .disp.class = {
+ .root = GA102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = GA102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_B,
+ .compute = AMPERE_COMPUTE_B,
+ },
+ .nvdec.class = NVC7B0_VIDEO_DECODER,
+ .nvenc.class = NVC7B7_VIDEO_ENCODER,
+ .ofa.class = NVC7FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
new file mode 100644
index 000000000000..2f517dcd721a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb10x_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = BLACKWELL_DMA_COPY_A,
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_A,
+ .compute = BLACKWELL_COMPUTE_A,
+ },
+ .nvdec.class = NVCDB0_VIDEO_DECODER,
+ .nvjpg.class = NVCDD1_VIDEO_NVJPG,
+ .ofa.class = NVCDFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
new file mode 100644
index 000000000000..950471d9996e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/ce/priv.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb20x_gpu = {
+ .disp.class = {
+ .root = GB202_DISP,
+ .caps = GB202_DISP_CAPS,
+ .core = GB202_DISP_CORE_CHANNEL_DMA,
+ .wndw = GB202_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GB202_DISP_CURSOR,
+ },
+
+ .usermode.class = BLACKWELL_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_B,
+ .doorbell_handle = gb202_chan_doorbell_handle,
+ },
+
+ .ce = {
+ .class = BLACKWELL_DMA_COPY_B,
+ .grce_mask = gb202_ce_grce_mask,
+ },
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_B,
+ .compute = BLACKWELL_COMPUTE_B,
+ },
+ .nvdec.class = NVCFB0_VIDEO_DECODER,
+ .nvenc.class = NVCFB7_VIDEO_ENCODER,
+ .nvjpg.class = NVCFD1_VIDEO_NVJPG,
+ .ofa.class = NVCFFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
new file mode 100644
index 000000000000..49e2c54e1aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gh100_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = HOPPER_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = HOPPER_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = HOPPER_A,
+ .compute = HOPPER_COMPUTE_A,
+ },
+ .nvdec.class = NVB8B0_VIDEO_DECODER,
+ .nvjpg.class = NVB8D1_VIDEO_NVJPG,
+ .ofa.class = NVB8FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
new file mode 100644
index 000000000000..46a6325641b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_GPU_H__
+#define __NVKM_RM_GPU_H__
+#include "rm.h"
+
+struct nvkm_rm_gpu {
+ struct {
+ struct {
+ u32 root;
+ u32 caps;
+ u32 core;
+ u32 wndw;
+ u32 wimm;
+ u32 curs;
+ } class;
+ } disp;
+
+ struct {
+ u32 class;
+ } usermode;
+
+ struct {
+ struct {
+ u32 class;
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+ } chan;
+ } fifo;
+
+ struct {
+ u32 class;
+ u32 (*grce_mask)(struct nvkm_device *);
+ } ce;
+
+ struct {
+ struct {
+ u32 i2m;
+ u32 twod;
+ u32 threed;
+ u32 compute;
+ } class;
+ } gr;
+
+ struct {
+ u32 class;
+ } nvdec;
+
+ struct {
+ u32 class;
+ } nvenc;
+
+ struct {
+ u32 class;
+ } nvjpg;
+
+ struct {
+ u32 class;
+ } ofa;
+};
+
+extern const struct nvkm_rm_gpu tu1xx_gpu;
+extern const struct nvkm_rm_gpu ga100_gpu;
+extern const struct nvkm_rm_gpu ga1xx_gpu;
+extern const struct nvkm_rm_gpu ad10x_gpu;
+extern const struct nvkm_rm_gpu gh100_gpu;
+extern const struct nvkm_rm_gpu gb10x_gpu;
+extern const struct nvkm_rm_gpu gb20x_gpu;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
new file mode 100644
index 000000000000..f40b8fcc2bcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gr.h"
+
+#include <engine/fifo.h>
+#include <engine/gr/priv.h>
+
+static int
+nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+
+ return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject);
+}
+
+static int
+nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+
+ if (rm->api->gr->scrubber.fini)
+ rm->api->gr->scrubber.fini(gr);
+
+ return 0;
+}
+
+static int
+nvkm_rm_gr_init(struct nvkm_gr *base)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ int ret;
+
+ if (rm->api->gr->scrubber.init) {
+ ret = rm->api->gr->scrubber.init(gr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nvkm_rm_gr_new(struct nvkm_rm *rm)
+{
+ const u32 classes[] = {
+ rm->gpu->gr.class.i2m,
+ rm->gpu->gr.class.twod,
+ rm->gpu->gr.class.threed,
+ rm->gpu->gr.class.compute,
+ };
+ struct nvkm_gr_func *func;
+ struct r535_gr *gr;
+
+ func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = r535_gr_dtor;
+ func->oneinit = r535_gr_oneinit;
+ func->init = nvkm_rm_gr_init;
+ func->fini = nvkm_rm_gr_fini;
+ func->units = r535_gr_units;
+ func->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < ARRAY_SIZE(classes); i++) {
+ func->sclass[i].oclass = classes[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_gr_obj_ctor;
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (!gr) {
+ kfree(func);
+ return -ENOMEM;
+ }
+
+ nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base);
+ gr->scrubber.chid = -1;
+ rm->device->gr = &gr->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
new file mode 100644
index 000000000000..24980f23aab9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RM_GR_H__
+#define __NVKM_RM_GR_H__
+#include "engine.h"
+
+#include <core/object.h>
+#include <engine/gr.h>
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+
+ struct {
+ int chid;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_gsp_object threed;
+ struct {
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } ctxbuf;
+ bool enabled;
+ } scrubber;
+};
+
+struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+void r535_gr_get_ctxbuf_info(struct r535_gr *, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
new file mode 100644
index 000000000000..3bdb5ad320d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_HANDLES_H__
+#define __NVKM_RM_HANDLES_H__
+
+/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */
+
+#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id))
+#define NVKM_RM_CLIENT_MASK 0x0000ffff
+#define NVKM_RM_DEVICE 0xde1d0000
+#define NVKM_RM_SUBDEVICE 0x5d1d0000
+#define NVKM_RM_DISP 0x00730000
+#define NVKM_RM_VASPACE 0x90f10000
+#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid))
+#define NVKM_RM_THREED 0x97000000
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
new file mode 100644
index 000000000000..d9fbfc377864
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvdec.h>
+
+static void *
+nvkm_rm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvdec, engine);
+}
+
+int
+nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvdec *nvdec;
+ int ret;
+
+ nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst,
+ &rm->gpu->nvdec.class, 1, &nvdec->engine);
+ if (ret) {
+ kfree(nvdec);
+ return ret;
+ }
+
+ rm->device->nvdec[inst] = nvdec;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
new file mode 100644
index 000000000000..6dfa7b789e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvenc.h>
+
+static void *
+nvkm_rm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvenc, engine);
+}
+
+int
+nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL);
+ if (!nvenc)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst,
+ &rm->gpu->nvenc.class, 1, &nvenc->engine);
+ if (ret) {
+ kfree(nvenc);
+ return ret;
+ }
+
+ rm->device->nvenc[inst] = nvenc;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
new file mode 100644
index 000000000000..a5f6b2abfd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
new file mode 100644
index 000000000000..46e3a29f2ad7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/alloc.h"
+#include "nvrm/rpcfn.h"
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret = NULL;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
+
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_alloc
+r535_alloc = {
+ .get = r535_gsp_rpc_rm_alloc_get,
+ .push = r535_gsp_rpc_rm_alloc_push,
+ .done = r535_gsp_rpc_rm_alloc_done,
+ .free = r535_gsp_rpc_rm_free,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
index 3a30bea30e36..d06bf95b9a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <subdev/bar/gf100.h>
#include <core/mm.h>
#include <subdev/fb.h>
@@ -27,14 +27,20 @@
#include <subdev/instmem.h>
#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+#include "nvrm/bar.h"
+#include "nvrm/rpcfn.h"
static void
r535_bar_flush(struct nvkm_bar *bar)
{
+ /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before
+ * BAR2 page tables have been restored.
+ */
+ if (unlikely(!bar->bar2)) {
+ g84_bar_flush(bar);
+ return;
+ }
+
ioread32_native(bar->flushBAR2);
}
@@ -44,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base)
}
static int
-r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe)
{
rpc_update_bar_pde_v15_00 *rpc;
@@ -53,21 +59,22 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
return -EIO;
rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
- rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
- rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+ rpc->info.entryValue = pdbe;
+ rpc->info.entryLevelShift = page_shift;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
r535_bar_bar2_fini(struct nvkm_bar *bar)
{
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = bar->subdev.device->gsp;
bar->flushBAR2 = bar->flushBAR2PhysMode;
nvkm_done(bar->flushFBZero);
- WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0));
}
static void
@@ -76,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar)
struct nvkm_device *device = bar->subdev.device;
struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = device->gsp;
-
- WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+ struct nvkm_memory *pdb = vmm->pd->pt[0]->memory;
+ u32 pdb_offset = vmm->pd->pt[0]->base;
+ u32 pdbe_lo, pdbe_hi;
+ u64 pdbe;
+
+ nvkm_kmap(pdb);
+ pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0);
+ pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4);
+ pdbe = ((u64)pdbe_hi << 32) | pdbe_lo;
+ nvkm_done(pdb);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe));
vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
if (!bar->flushFBZero) {
@@ -174,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
}
*pbar = bar;
- bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644
index 000000000000..2d1ce9db2dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ce.h"
+#include "nvrm/engine.h"
+
+static int
+r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ce)
+{
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst;
+
+ return nvkm_gsp_rm_alloc_wr(ce, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ce = {
+ .alloc = r535_ce_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
index 7bfa6240d283..ec71f683e609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
@@ -19,28 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <rm/rm.h>
-#include <subdev/gsp.h>
+#include "nvrm/client.h"
-#include <nvif/class.h>
+static int
+r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
-static const struct gf100_gr_func
-ad102_gr = {
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, ADA_A },
- { -1, -1, ADA_COMPUTE_A },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
-int
-ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+ args->hClient = client->object.handle;
+ args->processID = ~0;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
}
+
+const struct nvkm_rm_api_client
+r535_client = {
+ .ctor = r535_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
new file mode 100644
index 000000000000..70b9ee911c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/ctrl.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ if (!params)
+ return;
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ int ret = 0;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+ if (IS_ERR_OR_NULL(rpc)) {
+ *params = NULL;
+ return PTR_ERR(rpc);
+ }
+
+ if (rpc->status) {
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN && ret != -EBUSY)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ }
+
+ if (repc)
+ *params = rpc->params;
+ else
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_ctrl
+r535_ctrl = {
+ .get = r535_gsp_rpc_rm_ctrl_get,
+ .push = r535_gsp_rpc_rm_ctrl_push,
+ .done = r535_gsp_rpc_rm_ctrl_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
new file mode 100644
index 000000000000..f830e12a8f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/device.h"
+#include "nvrm/event.h"
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0,
+ sizeof(*args), &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+const struct nvkm_rm_api_device
+r535_device = {
+ .ctor = r535_gsp_device_ctor,
+ .dtor = r535_gsp_device_dtor,
+ .event.ctor = r535_gsp_device_event_ctor,
+ .event.dtor = r535_gsp_event_dtor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 99110ab2f44d..6e63df816d85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -19,13 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "chan.h"
-#include "conn.h"
-#include "dp.h"
-#include "head.h"
-#include "ior.h"
-#include "outp.h"
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
#include <core/ramht.h>
#include <subdev/bios.h>
@@ -34,19 +34,11 @@
#include <subdev/mmu.h>
#include <subdev/vfn.h>
+#include <rm/gpu.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+#include "nvrm/disp.h"
#include <linux/acpi.h>
@@ -78,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_chan_push(struct nvkm_disp_chan *chan)
+r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
{
- struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -89,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- if (chan->memory) {
- switch (nvkm_memory_target(chan->memory)) {
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_NCOH:
ctrl->addressSpace = ADDR_SYSMEM;
ctrl->cacheSnoop = 0;
@@ -107,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan)
return -EINVAL;
}
- ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
- ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
}
- ctrl->hclass = chan->object.oclass;
- ctrl->channelInstance = chan->head;
- ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
}
@@ -121,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan)
static int
r535_curs_init(struct nvkm_disp_chan *chan)
{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
int ret;
- ret = r535_chan_push(chan);
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL);
if (ret)
return ret;
@@ -172,25 +165,34 @@ r535_dmac_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_dmac_init(struct nvkm_disp_chan *chan)
+r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
{
NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
- int ret;
-
- ret = r535_chan_push(chan);
- if (ret)
- return ret;
- args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
- (chan->object.oclass << 16) | chan->head,
- chan->object.oclass, sizeof(*args), &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
if (IS_ERR(args))
return PTR_ERR(args);
- args->channelInstance = chan->head;
- args->offset = chan->suspend_put;
+ args->channelInstance = inst;
+ args->offset = put_offset;
- return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory);
+ if (ret)
+ return ret;
+
+ return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head,
+ chan->suspend_put, &chan->rm.object);
}
static int
@@ -260,47 +262,47 @@ r535_core = {
};
static int
-r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
{
- struct nvkm_disp *disp = sor->disp;
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl->brightness = lvl;
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
- return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
}
static int
-r535_sor_bl_get(struct nvkm_ior *sor)
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
{
struct nvkm_disp *disp = sor->disp;
- NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int ret, lvl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
- ctrl->displayId = BIT(sor->asy.outp->index);
+ return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl);
+}
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+ int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl);
- lvl = ctrl->brightness;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return lvl;
+ return (ret == 0) ? lvl : ret;
}
static const struct nvkm_ior_func_bl
@@ -646,7 +648,7 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
if (IS_ERR(ctrl))
- return (void *)ctrl;
+ return ERR_CAST(ctrl);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
@@ -730,7 +732,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
}
static int
-r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
int ret;
@@ -763,7 +765,9 @@ r535_outp_inherit(struct nvkm_outp *outp)
int ret;
list_for_each_entry(head, &disp->heads, head) {
- ret = r535_disp_head_displayid(disp, head->id, &displayid);
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+
+ ret = rmapi->disp->get_active(disp, head->id, &displayid);
if (WARN_ON(ret))
return NULL;
@@ -858,10 +862,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
}
static int
-r535_outp_detect(struct nvkm_outp *outp)
+r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
{
NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
- struct nvkm_disp *disp = outp->disp;
int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
@@ -870,23 +873,29 @@ r535_outp_detect(struct nvkm_outp *outp)
return PTR_ERR(ctrl);
ctrl->subDeviceInstance = 0;
- ctrl->displayMask = BIT(outp->index);
+ ctrl->displayMask = BIT(display_id);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
- if (ctrl->displayMask & BIT(outp->index)) {
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->get_connect_state(outp->disp, outp->index);
+ if (ret == 1) {
ret = r535_outp_dfp_get_info(outp);
if (ret == 0)
ret = 1;
- } else {
- ret = 0;
}
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1029,15 +1038,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain)
}
static int
-r535_dp_rates(struct nvkm_outp *outp)
+r535_dp_set_indexed_link_rates(struct nvkm_outp *outp)
{
NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
- if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
- !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
- return 0;
-
if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
return -EINVAL;
@@ -1054,6 +1059,18 @@ r535_dp_rates(struct nvkm_outp *outp)
}
static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ return rm->api->disp->dp.set_indexed_link_rates(outp);
+}
+
+static int
r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
{
struct nvkm_disp *disp = outp->disp;
@@ -1151,6 +1168,49 @@ r535_dp = {
};
static int
+r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
{
NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
@@ -1194,6 +1254,7 @@ r535_tmds = {
static int
r535_outp_new(struct nvkm_disp *disp, u32 id)
{
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
enum nvkm_ior_proto proto;
struct dcb_output dcbE = {};
@@ -1278,43 +1339,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
if (ret)
return ret;
} else {
- NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
bool mst, wm;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->sorIndex = ~0;
-
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm);
+ if (ret)
return ret;
- }
-
- switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
- dcbE.dpconf.link_bw = 0x06;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
- dcbE.dpconf.link_bw = 0x0a;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
- dcbE.dpconf.link_bw = 0x14;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
- dcbE.dpconf.link_bw = 0x1e;
- break;
- default:
- dcbE.dpconf.link_bw = 0x00;
- break;
- }
-
- mst = ctrl->bIsMultistreamSupported;
- wm = ctrl->bHasIncreasedWatermarkLimits;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
if (WARN_ON(!dcbE.dpconf.link_bw))
return -EINVAL;
@@ -1441,11 +1470,47 @@ r535_disp_init(struct nvkm_disp *disp)
}
static int
+r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
r535_disp_oneinit(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_gsp *gsp = device->gsp;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ unsigned long mask;
int ret, i;
/* RAMIN. */
@@ -1476,24 +1541,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (ret)
return ret;
- ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0,
&disp->rm.objcom);
if (ret)
return ret;
- {
- NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- disp->wndw.mask = ctrl->windowPresentMask;
- disp->wndw.nr = fls(disp->wndw.mask);
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
- }
+ ret = rmapi->disp->get_static_info(disp);
+ if (ret)
+ return ret;
/* */
{
@@ -1622,25 +1677,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
return ret;
}
- /* */
- {
- NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
- unsigned long mask;
- int i;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
- NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- mask = ctrl->displayMask;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->get_supported(disp, &mask);
+ if (ret)
+ return ret;
- for_each_set_bit(i, &mask, 32) {
- ret = r535_outp_new(disp, i);
- if (ret)
- return ret;
- }
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
}
ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
@@ -1686,6 +1730,7 @@ int
r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_disp_func *rm;
int ret;
@@ -1701,20 +1746,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
rm->sor.new = r535_sor_new;
rm->ramht_size = hw->ramht_size;
- rm->root = hw->root;
+ rm->root.oclass = gpu->disp.class.root;
- for (int i = 0; hw->user[i].ctor; i++) {
- switch (hw->user[i].base.oclass & 0xff) {
- case 0x73: rm->user[i] = hw->user[i]; break;
- case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
- case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
- case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
- case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
- default:
- WARN_ON(1);
- continue;
- }
- }
+ rm->user[0].base.oclass = gpu->disp.class.caps;
+ rm->user[0].ctor = gv100_disp_caps_new;
+
+ rm->user[1].base.oclass = gpu->disp.class.core;
+ rm->user[1].ctor = nvkm_disp_core_new;
+ rm->user[1].chan = &r535_core;
+
+ rm->user[2].base.oclass = gpu->disp.class.wndw;
+ rm->user[2].ctor = nvkm_disp_wndw_new;
+ rm->user[2].chan = &r535_wndw;
+
+ rm->user[3].base.oclass = gpu->disp.class.wimm;
+ rm->user[3].ctor = nvkm_disp_wndw_new;
+ rm->user[3].chan = &r535_wimm;
+
+ rm->user[4].base.oclass = gpu->disp.class.curs;
+ rm->user[4].ctor = nvkm_disp_chan_new;
+ rm->user[4].chan = &r535_curs;
ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
if (ret)
@@ -1723,3 +1774,20 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
mutex_init(&(*pdisp)->super.mutex); //XXX
return ret;
}
+
+const struct nvkm_rm_api_disp
+r535_disp = {
+ .get_static_info = r535_disp_get_static_info,
+ .get_supported = r535_disp_get_supported,
+ .get_connect_state = r535_disp_get_connect_state,
+ .get_active = r535_disp_get_active,
+ .bl_ctrl = r535_bl_ctrl,
+ .dp = {
+ .get_caps = r535_dp_get_caps,
+ .set_indexed_link_rates = r535_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r535_disp_chan_set_pushbuf,
+ .dmac_alloc = r535_dmac_alloc,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
index 5f3c9c02a4c0..150e22fde2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
@@ -19,19 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-
+#include <subdev/instmem/priv.h>
#include <subdev/gsp.h>
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/fbsr.h"
+#include "nvrm/rpcfn.h"
struct fbsr_item {
const char *type;
@@ -54,9 +48,9 @@ struct fbsr {
u64 sys_offset;
};
-static int
-fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
- u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+int
+r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
{
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
@@ -105,7 +99,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target
rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
}
- ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
if (ret)
return ret;
@@ -123,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
- item->addr, item->size, NULL, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
if (ret)
return ret;
@@ -161,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
- 0, fbsr->size, sgt, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
if (ret)
return ret;
@@ -206,22 +200,19 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
}
-static void
-r535_instmem_resume(struct nvkm_instmem *imem)
+void
+r535_fbsr_resume(struct nvkm_gsp *gsp)
{
/* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
- if (imem->rm.fbsr_valid) {
- nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
- imem->rm.fbsr_valid = false;
- }
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
}
static int
-r535_instmem_suspend(struct nvkm_instmem *imem)
+r535_fbsr_suspend(struct nvkm_gsp *gsp)
{
- struct nvkm_subdev *subdev = &imem->subdev;
+ struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_instmem *imem = device->imem;
struct nvkm_instobj *iobj;
struct fbsr fbsr = {};
struct fbsr_item *item, *temp;
@@ -262,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
fbsr.size += gsp->fb.bios.vga_workspace.size;
nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
- ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
if (ret)
goto done;
@@ -271,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
if (ret)
goto done_sgt;
- ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+ ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
if (WARN_ON(ret))
goto done_sgt;
@@ -282,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
goto done_sgt;
}
- imem->rm.fbsr_valid = true;
-
/* Cleanup everything except the sysmem backup, which will be removed after resume. */
done_sgt:
if (ret) /* ... unless we failed already. */
- nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
done:
list_for_each_entry_safe(item, temp, &fbsr.items, head) {
list_del(&item->head);
@@ -299,6 +288,12 @@ done:
return ret;
}
+const struct nvkm_rm_api_fbsr
+r535_fbsr = {
+ .suspend = r535_fbsr_suspend,
+ .resume = r535_fbsr_resume,
+};
+
static void *
r535_instmem_dtor(struct nvkm_instmem *imem)
{
@@ -319,11 +314,10 @@ r535_instmem_new(const struct nvkm_instmem_func *hw,
rm->dtor = r535_instmem_dtor;
rm->fini = hw->fini;
- rm->suspend = r535_instmem_suspend;
- rm->resume = r535_instmem_resume;
rm->memory_new = hw->memory_new;
rm->memory_wrap = hw->memory_wrap;
rm->zero = false;
+ rm->set_bar0_window_addr = hw->set_bar0_window_addr;
ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 3454c7d29502..1ac5628c5140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -19,11 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "cgrp.h"
-#include "chan.h"
-#include "chid.h"
-#include "runl.h"
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
#include <core/gpuobj.h>
#include <subdev/gsp.h>
@@ -31,24 +31,19 @@
#include <subdev/vfn.h>
#include <engine/gr.h>
+#include <rm/engine.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
static u32
r535_chan_doorbell_handle(struct nvkm_chan *chan)
{
- return (chan->cgrp->runl->id << 16) | chan->id;
+ struct nvkm_gsp *gsp = chan->rm.object.client->gsp;
+
+ return gsp->rm->gpu->fifo.chan.doorbell_handle(chan);
}
static void
@@ -77,50 +72,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan)
#define CHID_PER_USERD 8
static int
-r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- struct nvkm_engn *engn;
- struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
- const int userd_p = chan->id / CHID_PER_USERD;
- const int userd_i = chan->id % CHID_PER_USERD;
- u32 eT = ~0;
- int ret;
-
- if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
- ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
- if (ret)
- return ret;
- }
-
- nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
- eT = engn->id;
- break;
- }
-
- if (WARN_ON(eT == ~0))
- return -EINVAL;
- chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
- fifo->rm.mthdbuf_size,
- &chan->rm.mthdbuf.addr, GFP_KERNEL);
- if (!chan->rm.mthdbuf.ptr)
- return -ENOMEM;
-
- args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
- fifo->func->chan.user.oclass, sizeof(*args),
- &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
if (WARN_ON(IS_ERR(args)))
return PTR_ERR(args);
- args->gpFifoOffset = offset;
- args->gpFifoEntries = length / 8;
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
- args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
if (!priv)
args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
else
@@ -143,25 +117,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = chan->vmm->rm.object.handle;
- args->engineType = eT;
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
- args->instanceMem.base = chan->inst->addr;
- args->instanceMem.size = chan->inst->size;
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
args->instanceMem.addressSpace = 2;
args->instanceMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ args->userdMem.base = userd_addr;
args->userdMem.size = fifo->func->chan.func->userd->size;
args->userdMem.addressSpace = 2;
args->userdMem.cacheAttrib = 1;
- args->ramfcMem.base = chan->inst->addr + 0;
+ args->ramfcMem.base = inst_addr;
args->ramfcMem.size = 0x200;
args->ramfcMem.addressSpace = 2;
args->ramfcMem.cacheAttrib = 1;
- args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+ args->mthdbufMem.base = mthdbuf_addr;
args->mthdbufMem.size = fifo->rm.mthdbuf_size;
args->mthdbufMem.addressSpace = 1;
args->mthdbufMem.cacheAttrib = 0;
@@ -173,7 +147,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
- ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ const struct nvkm_rm_api *rmapi = device->gsp->rm->api;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id),
+ eT, chan->runq, priv, chan->id, chan->inst->addr,
+ nvkm_memory_addr(chan->userd.mem) + chan->userd.base,
+ chan->rm.mthdbuf.addr, chan->vmm, offset, length,
+ &chan->rm.object);
if (ret)
return ret;
@@ -215,123 +226,8 @@ r535_chan_ramfc = {
.priv = true,
};
-struct r535_chan_userd {
- struct nvkm_memory *mem;
- struct nvkm_memory *map;
- int chid;
- u32 used;
-
- struct list_head head;
-} *userd;
-
-static void
-r535_chan_id_put(struct nvkm_chan *chan)
-{
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
-
- mutex_lock(&fifo->userd.mutex);
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->map == chan->userd.mem) {
- u32 chid = chan->userd.base / chan->func->userd->size;
-
- userd->used &= ~BIT(chid);
- if (!userd->used) {
- nvkm_memory_unref(&userd->map);
- nvkm_memory_unref(&userd->mem);
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- list_del(&userd->head);
- kfree(userd);
- }
-
- break;
- }
- }
- mutex_unlock(&fifo->userd.mutex);
-
-}
-
-static int
-r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
- u32 chid;
- int ret;
-
- if (ouserd + chan->func->userd->size >= userd_size ||
- (ouserd & (chan->func->userd->size - 1))) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- chid = div_u64(ouserd, chan->func->userd->size);
-
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->mem == muserd) {
- if (userd->used & BIT(chid))
- return -EBUSY;
- break;
- }
- }
-
- if (&userd->head == &fifo->userd.list) {
- if (nvkm_memory_size(muserd) < userd_size) {
- RUNL_DEBUG(runl, "userd too small");
- return -EINVAL;
- }
-
- userd = kzalloc(sizeof(*userd), GFP_KERNEL);
- if (!userd)
- return -ENOMEM;
-
- userd->chid = nvkm_chid_get(runl->chid, chan);
- if (userd->chid < 0) {
- ret = userd->chid;
- kfree(userd);
- return ret;
- }
-
- userd->mem = nvkm_memory_ref(muserd);
-
- ret = nvkm_memory_kmap(userd->mem, &userd->map);
- if (ret) {
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- kfree(userd);
- return ret;
- }
-
-
- list_add(&userd->head, &fifo->userd.list);
- }
-
- userd->used |= BIT(chid);
-
- chan->userd.mem = nvkm_memory_ref(userd->map);
- chan->userd.base = ouserd;
-
- return (userd->chid * CHID_PER_USERD) + chid;
-}
-
-static int
-r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- int ret;
-
- mutex_lock(&fifo->userd.mutex);
- ret = r535_chan_id_get_locked(chan, muserd, ouserd);
- mutex_unlock(&fifo->userd.mutex);
- return ret;
-}
-
static const struct nvkm_chan_func
r535_chan = {
- .id_get = r535_chan_id_get,
- .id_put = r535_chan_id_put,
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &r535_chan_ramfc,
@@ -340,10 +236,6 @@ r535_chan = {
.doorbell_handle = r535_chan_doorbell_handle,
};
-static const struct nvkm_cgrp_func
-r535_cgrp = {
-};
-
static int
r535_engn_nonstall(struct nvkm_engn *engn)
{
@@ -356,7 +248,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
}
static const struct nvkm_engn_func
-r535_ce = {
+r535_engn_ce = {
.nonstall = r535_engn_nonstall,
};
@@ -376,7 +268,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c
}
static const struct nvkm_engn_func
-r535_gr = {
+r535_engn_gr = {
.nonstall = r535_engn_nonstall,
.ctor2 = r535_gr_ctor,
};
@@ -449,57 +341,86 @@ r535_runl = {
.allow = r535_runl_allow,
};
-static int
-r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+void
+r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid)
{
- switch (type) {
- case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
- case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
- case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
- case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
- case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
- case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
- case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
- case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
- default:
- break;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (!chan) {
+ nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid);
+ return;
}
- WARN_ON(1);
- return -EINVAL;
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
}
static int
-r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
switch (rm) {
- case RM_ENGINE_TYPE_GR0:
- *ptype = NVKM_ENGINE_GR;
- return 0;
- case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
- *ptype = NVKM_ENGINE_CE;
- return rm - RM_ENGINE_TYPE_COPY0;
- case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
- *ptype = NVKM_ENGINE_NVDEC;
- return rm - RM_ENGINE_TYPE_NVDEC0;
- case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
- *ptype = NVKM_ENGINE_NVENC;
- return rm - RM_ENGINE_TYPE_NVENC0;
- case RM_ENGINE_TYPE_SW:
- *ptype = NVKM_ENGINE_SW;
- return 0;
- case RM_ENGINE_TYPE_SEC2:
- *ptype = NVKM_ENGINE_SEC2;
- return 0;
- case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
- *ptype = NVKM_ENGINE_NVJPG;
- return rm - RM_ENGINE_TYPE_NVJPEG0;
- case RM_ENGINE_TYPE_OFA:
- *ptype = NVKM_ENGINE_OFA;
- return 0;
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA, OFA, 0);
default:
return -EINVAL;
}
+#undef RM_ENGINE_TYPE
}
static int
@@ -536,16 +457,19 @@ static int
r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
- struct nvkm_gsp *gsp = subdev->device->gsp;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
- u32 cgids = 2048;
u32 chids = 2048;
+ u32 first = rm->api->fifo->rsvd_chids;
+ u32 count = chids - first;
int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
- if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
- (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid)))
return ret;
ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
@@ -576,25 +500,43 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
if (!runl)
continue;
- inst = r535_fifo_engn_type(rmid, &type);
+ inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080);
if (inst < 0) {
nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
nvkm_runl_del(runl);
continue;
}
- nv2080 = r535_fifo_2080_type(type, inst);
- if (nv2080 < 0) {
+ /* Skip SW engine - there's currently no support for NV SW classes. */
+ if (type == NVKM_ENGINE_SW)
+ continue;
+
+ /* Skip lone GRCEs (ones not paired with GR on a runlist), as they
+ * don't appear to function as async copy engines.
+ */
+ if (type == NVKM_ENGINE_CE &&
+ rm->gpu->ce.grce_mask &&
+ (rm->gpu->ce.grce_mask(device) & BIT(inst)) &&
+ !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) {
+ RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ ret = nvkm_rm_engine_new(gsp->rm, type, inst);
+ if (ret) {
nvkm_runl_del(runl);
continue;
}
+ engn = NULL;
+
switch (type) {
case NVKM_ENGINE_CE:
- engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst);
break;
case NVKM_ENGINE_GR:
- engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst);
break;
case NVKM_ENGINE_NVDEC:
case NVKM_ENGINE_NVENC:
@@ -633,7 +575,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
}
- return r535_fifo_ectx_size(fifo);
+ return rm->api->fifo->ectx_size(fifo);
}
static void
@@ -646,6 +588,7 @@ int
r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_fifo_func *rm;
if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
@@ -654,12 +597,20 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->dtor = r535_fifo_dtor;
rm->runl_ctor = r535_fifo_runl_ctor;
rm->runl = &r535_runl;
- rm->cgrp = hw->cgrp;
- rm->cgrp.func = &r535_cgrp;
- rm->chan = hw->chan;
+ rm->chan.user.oclass = gpu->fifo.chan.class;
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
+
+const struct nvkm_rm_api_fifo
+r535_fifo = {
+ .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type,
+ .ectx_size = r535_fifo_ectx_size,
+ .rc_triggered = r535_fifo_rc_triggered,
+ .chan = {
+ .alloc = r535_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644
index 000000000000..ddb57d5e73d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/gr.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+#include <engine/gr/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/vmm.h"
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+void
+r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ return;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+}
+
+static int
+r535_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ struct nvkm_gsp_object threed;
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
+ 1, 0, true, rm->api->fifo->rsvd_chids,
+ nvkm_memory_addr(golden.inst),
+ nvkm_memory_addr(golden.inst) + 0x1000,
+ nvkm_memory_addr(golden.inst) + 0x2000,
+ golden.vmm, 0, 0x1000, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ ret = rm->api->gr->get_ctxbufs_info(gr);
+ if (ret)
+ goto done;
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
+ if (ret)
+ goto done;
+
+ /* There's no need to keep the golden channel around, as RM caches the context. */
+ nvkm_gsp_rm_free(&threed);
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+const struct nvkm_rm_api_gr
+r535_gr = {
+ .get_ctxbufs_info = r535_gr_get_ctxbufs_info,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index db2602e88006..588cb4ab85cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <rm/rpc.h>
+
#include "priv.h"
#include <core/pci.h>
+#include <subdev/pci/priv.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
@@ -30,29 +33,11 @@
#include <nvfw/fw.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+#include "nvrm/event.h"
+#include "nvrm/fifo.h"
#include <linux/acpi.h>
#include <linux/ctype.h>
@@ -60,990 +45,6 @@
extern struct dentry *nouveau_debugfs_root;
-#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
-
-/**
- * DOC: GSP message queue element
- *
- * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
- *
- * The GSP command queue and status queue are message queues for the
- * communication between software and GSP. The software submits the GSP
- * RPC via the GSP command queue, GSP writes the status of the submitted
- * RPC in the status queue.
- *
- * A GSP message queue element consists of three parts:
- *
- * - message element header (struct r535_gsp_msg), which mostly maintains
- * the metadata for queuing the element.
- *
- * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
- * of the RPC. E.g., the RPC function number.
- *
- * - The payload, where the RPC message stays. E.g. the params of a
- * specific RPC function. Some RPC functions also have their headers
- * in the payload. E.g. rm_alloc, rm_control.
- *
- * The memory layout of a GSP message element can be illustrated below::
- *
- * +------------------------+
- * | Message Element Header |
- * | (r535_gsp_msg) |
- * | |
- * | (r535_gsp_msg.data) |
- * | | |
- * |----------V-------------|
- * | GSP RPC Header |
- * | (nvfw_gsp_rpc) |
- * | |
- * | (nvfw_gsp_rpc.data) |
- * | | |
- * |----------V-------------|
- * | Payload |
- * | |
- * | header(optional) |
- * | params |
- * +------------------------+
- *
- * The max size of a message queue element is 16 pages (including the
- * headers). When a GSP message to be sent is larger than 16 pages, the
- * message should be split into multiple elements and sent accordingly.
- *
- * In the bunch of the split elements, the first element has the expected
- * function number, while the rest of the elements are sent with the
- * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
- *
- * GSP consumes the elements from the cmdq and always writes the result
- * back to the msgq. The result is also formed as split elements.
- *
- * Terminology:
- *
- * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
- * payload)
- * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
- * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
- * - gsp_rpc_len: size of (GSP RPC header + payload)
- * - params_size: size of params in the payload
- * - payload_size: size of (header if exists + params) in the payload
- */
-
-struct r535_gsp_msg {
- u8 auth_tag_buffer[16];
- u8 aad_buffer[16];
- u32 checksum;
- u32 sequence;
- u32 elem_count;
- u32 pad;
- u8 data[];
-};
-
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
-#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
-
-#define to_gsp_hdr(p, header) \
- container_of((void *)p, typeof(*header), data)
-
-#define to_payload_hdr(p, header) \
- container_of((void *)p, typeof(*header), params)
-
-static int
-r535_rpc_status_to_errno(uint32_t rpc_status)
-{
- switch (rpc_status) {
- case 0x55: /* NV_ERR_NOT_READY */
- case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EBUSY;
- case 0x51: /* NV_ERR_NO_MEMORY */
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
- u32 size, rptr = *gsp->msgq.rptr;
- int used;
-
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
- GSP_PAGE_SIZE);
- if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return -EINVAL;
-
- do {
- u32 wptr = *gsp->msgq.wptr;
-
- used = wptr + gsp->msgq.cnt - rptr;
- if (used >= gsp->msgq.cnt)
- used -= gsp->msgq.cnt;
- if (used >= size)
- break;
-
- usleep_range(1, 2);
- } while (--(*ptime));
-
- if (WARN_ON(!*ptime))
- return -ETIMEDOUT;
-
- return used;
-}
-
-static struct r535_gsp_msg *
-r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
-{
- u32 rptr = *gsp->msgq.rptr;
-
- /* Skip the first page, which is the message queue info */
- return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
- rptr * GSP_PAGE_SIZE);
-}
-
-/**
- * DOC: Receive a GSP message queue element
- *
- * Receiving a GSP message queue element from the message queue consists of
- * the following steps:
- *
- * - Peek the element from the queue: r535_gsp_msgq_peek().
- * Peek the first page of the element to determine the total size of the
- * message before allocating the proper memory.
- *
- * - Allocate memory for the message.
- * Once the total size of the message is determined from the GSP message
- * queue element, the caller of r535_gsp_msgq_recv() allocates the
- * required memory.
- *
- * - Receive the message: r535_gsp_msgq_recv().
- * Copy the message into the allocated memory. Advance the read pointer.
- * If the message is a large GSP message, r535_gsp_msgq_recv() calls
- * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
- * until the complete message is received.
- * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
- * the return of the large GSP message.
- *
- * - Free the allocated memory: r535_gsp_msg_done().
- * The user is responsible for freeing the memory allocated for the GSP
- * message pages after they have been processed.
- */
-static void *
-r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- int ret;
-
- ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- return mqe->data;
-}
-
-struct r535_gsp_msg_info {
- int *retries;
- u32 gsp_rpc_len;
- void *gsp_rpc_buf;
- bool continuation;
-};
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
-
-static void *
-r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
- struct r535_gsp_msg_info *info)
-{
- u8 *buf = info->gsp_rpc_buf;
- u32 rptr = *gsp->msgq.rptr;
- struct r535_gsp_msg *mqe;
- u32 size, expected, len;
- int ret;
-
- expected = info->gsp_rpc_len;
-
- ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- if (info->continuation) {
- struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
- nvkm_error(&gsp->subdev,
- "Not a continuation of a large RPC\n");
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- return ERR_PTR(-EIO);
- }
- }
-
- size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
-
- len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, expected, len);
-
- if (info->continuation)
- memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
- len - sizeof(struct nvfw_gsp_rpc));
- else
- memcpy(buf, mqe->data, len);
-
- expected -= len;
-
- if (expected) {
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(buf + len, mqe, expected);
- }
-
- rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
-
- mb();
- (*gsp->msgq.rptr) = rptr;
- return buf;
-}
-
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
- struct nvfw_gsp_rpc *rpc;
- struct r535_gsp_msg_info info = {0};
- u32 expected = gsp_rpc_len;
- void *buf;
-
- mqe = r535_gsp_msgq_get_entry(gsp);
- rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (WARN_ON(rpc->length > max_rpc_size))
- return NULL;
-
- buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- info.gsp_rpc_buf = buf;
- info.retries = retries;
- info.gsp_rpc_len = rpc->length;
-
- buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR(buf)) {
- kvfree(info.gsp_rpc_buf);
- info.gsp_rpc_buf = NULL;
- return buf;
- }
-
- if (expected <= max_rpc_size)
- return buf;
-
- info.gsp_rpc_buf += info.gsp_rpc_len;
- expected -= info.gsp_rpc_len;
-
- while (expected) {
- u32 size;
-
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- info.gsp_rpc_len = rpc->length;
- info.continuation = true;
-
- rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- size = info.gsp_rpc_len - sizeof(*rpc);
- expected -= size;
- info.gsp_rpc_buf += size;
- }
-
- rpc = buf;
- rpc->length = gsp_rpc_len;
- return buf;
-}
-
-static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
-{
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- struct r535_gsp_msg *cqe;
- u32 gsp_rpc_len = msg->checksum;
- u64 *ptr = (void *)msg;
- u64 *end;
- u64 csum = 0;
- int free, time = 1000000;
- u32 wptr, size, step, len;
- u32 off = 0;
-
- len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
-
- end = (u64 *)((char *)ptr + len);
- msg->pad = 0;
- msg->checksum = 0;
- msg->sequence = gsp->cmdq.seq++;
- msg->elem_count = DIV_ROUND_UP(len, 0x1000);
-
- while (ptr < end)
- csum ^= *ptr++;
-
- msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
-
- wptr = *gsp->cmdq.wptr;
- do {
- do {
- free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
- if (free >= gsp->cmdq.cnt)
- free -= gsp->cmdq.cnt;
- if (free >= 1)
- break;
-
- usleep_range(1, 2);
- } while(--time);
-
- if (WARN_ON(!time)) {
- kvfree(msg);
- return -ETIMEDOUT;
- }
-
- cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, len, step * GSP_PAGE_SIZE);
-
- memcpy(cqe, (u8 *)msg + off, size);
-
- wptr += DIV_ROUND_UP(size, 0x1000);
- if (wptr == gsp->cmdq.cnt)
- wptr = 0;
-
- off += size;
- len -= size;
- } while (len);
-
- nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
- wmb();
- (*gsp->cmdq.wptr) = wptr;
- mb();
-
- nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
-
- kvfree(msg);
- return 0;
-}
-
-static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
-{
- struct r535_gsp_msg *msg;
- u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
-
- size = ALIGN(size, GSP_MSG_MIN_SIZE);
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- msg->checksum = gsp_rpc_len;
- return msg->data;
-}
-
-static void
-r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
-{
- kvfree(msg);
-}
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
-{
- if (gsp->subdev.debug >= lvl) {
- nvkm_printk__(&gsp->subdev, lvl, info,
- "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
- msg->function, msg->length, msg->length - sizeof(*msg),
- msg->rpc_result, msg->rpc_result_private);
- print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
- msg->data, msg->length - sizeof(*msg), true);
- }
-}
-
-static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *rpc;
- int retries = 4000000, i;
-
-retry:
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->rpc_result) {
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EINVAL);
- }
-
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
-
- if (fn && rpc->function == fn) {
- if (gsp_rpc_len) {
- if (rpc->length < gsp_rpc_len) {
- nvkm_error(subdev, "rpc len %d < %d\n",
- rpc->length, gsp_rpc_len);
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EIO);
- }
-
- return rpc;
- }
-
- r535_gsp_msg_done(gsp, rpc);
- return NULL;
- }
-
- for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
- struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
-
- if (ntfy->fn == rpc->function) {
- if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
- rpc->length - sizeof(*rpc));
- break;
- }
- }
-
- if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
-
- r535_gsp_msg_done(gsp, rpc);
- if (fn)
- goto retry;
-
- if (*gsp->msgq.rptr != *gsp->msgq.wptr)
- goto retry;
-
- return NULL;
-}
-
-static int
-r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
-{
- int ret = 0;
-
- mutex_lock(&gsp->msgq.mutex);
- if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
- ret = -ENOSPC;
- } else {
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
- gsp->msgq.ntfy_nr++;
- }
- mutex_unlock(&gsp->msgq.mutex);
- return ret;
-}
-
-static int
-r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
-{
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- repv = r535_gsp_msg_recv(gsp, fn, 0);
- mutex_unlock(&gsp->cmdq.mutex);
- if (IS_ERR(repv))
- return PTR_ERR(repv);
-
- return 0;
-}
-
-static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
- u32 fn = rpc->function;
- void *repv = NULL;
- int ret;
-
- if (gsp->subdev.debug >= NV_DBG_TRACE) {
- nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
- rpc->length, rpc->length - sizeof(*rpc));
- print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
- rpc->data, rpc->length - sizeof(*rpc), true);
- }
-
- ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret)
- return ERR_PTR(ret);
-
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
-}
-
-static void
-r535_gsp_event_dtor(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
-
- mutex_lock(&gsp->client_id.mutex);
- if (event->func) {
- list_del(&event->head);
- event->func = NULL;
- }
- mutex_unlock(&gsp->client_id.mutex);
-
- nvkm_gsp_rm_free(&event->object);
- event->device = NULL;
-}
-
-static int
-r535_gsp_device_event_get(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
- NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->event = event->id;
- ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
- return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
-}
-
-static int
-r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
- nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
- NV0005_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
- NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
- &event->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hParentClient = client->object.handle;
- args->hSrcResource = 0;
- args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
- args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
- args->data = NULL;
-
- ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
- if (ret)
- return ret;
-
- event->device = device;
- event->id = id;
-
- ret = r535_gsp_device_event_get(event);
- if (ret) {
- nvkm_gsp_event_dtor(event);
- return ret;
- }
-
- mutex_lock(&gsp->client_id.mutex);
- event->func = func;
- list_add(&event->head, &client->events);
- mutex_unlock(&gsp->client_id.mutex);
- return 0;
-}
-
-static void
-r535_gsp_device_dtor(struct nvkm_gsp_device *device)
-{
- nvkm_gsp_rm_free(&device->subdevice);
- nvkm_gsp_rm_free(&device->object);
-}
-
-static int
-r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
-{
- NV2080_ALLOC_PARAMETERS *args;
-
- return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
- &device->subdevice);
-}
-
-static int
-r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
-{
- NV0080_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
- &device->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hClientShare = client->object.handle;
-
- ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
- if (ret)
- return ret;
-
- ret = r535_gsp_subdevice_ctor(device);
- if (ret)
- nvkm_gsp_rm_free(&device->object);
-
- return ret;
-}
-
-static void
-r535_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- struct nvkm_gsp *gsp = client->gsp;
-
- nvkm_gsp_rm_free(&client->object);
-
- mutex_lock(&gsp->client_id.mutex);
- idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
- mutex_unlock(&gsp->client_id.mutex);
-
- client->gsp = NULL;
-}
-
-static int
-r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- NV0000_ALLOC_PARAMETERS *args;
- int ret;
-
- mutex_lock(&gsp->client_id.mutex);
- ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
- mutex_unlock(&gsp->client_id.mutex);
- if (ret < 0)
- return ret;
-
- client->gsp = gsp;
- client->object.client = client;
- INIT_LIST_HEAD(&client->events);
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
- &client->object);
- if (IS_ERR(args)) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- args->hClient = client->object.handle;
- args->processID = ~0;
-
- ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
- if (ret) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- return 0;
-}
-
-static int
-r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_free_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
- client->object.handle, object->handle);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
- if (WARN_ON(IS_ERR_OR_NULL(rpc)))
- return -EIO;
-
- rpc->params.hRoot = client->object.handle;
- rpc->params.hObjectParent = 0;
- rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
-}
-
-static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- void *ret = NULL;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->status) {
- ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- }
-
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
- u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_alloc_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
- client->object.handle, object->parent->handle,
- object->handle);
-
- nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
- params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hParent = object->parent->handle;
- rpc->hObject = object->handle;
- rpc->hClass = oclass;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- if (!params)
- return;
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- int ret = 0;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc)) {
- *params = NULL;
- return PTR_ERR(rpc);
- }
-
- if (rpc->status) {
- ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN && ret != -EBUSY)
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- }
-
- if (repc)
- *params = rpc->params;
- else
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_control_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
- client->object.handle, object->handle, cmd, params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hObject = object->handle;
- rpc->cmd = cmd;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
-{
- struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
-
- r535_gsp_msg_done(gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
-{
- struct nvfw_gsp_rpc *rpc;
-
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
- sizeof(u64)));
- if (IS_ERR(rpc))
- return ERR_CAST(rpc);
-
- rpc->header_version = 0x03000000;
- rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
- rpc->function = fn;
- rpc->rpc_result = 0xffffffff;
- rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + payload_size;
- return rpc->data;
-}
-
-static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
- const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
- u32 payload_size = rpc->length - sizeof(*rpc);
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- if (payload_size > max_payload_size) {
- const u32 fn = rpc->function;
- u32 remain_payload_size = payload_size;
-
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
-
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += max_payload_size;
- remain_payload_size -= max_payload_size;
-
- /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (remain_payload_size) {
- u32 size = min(remain_payload_size,
- max_payload_size);
- void *next;
-
- next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
- if (IS_ERR(next)) {
- repv = next;
- goto done;
- }
-
- memcpy(next, payload, size);
-
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += size;
- remain_payload_size -= size;
- }
-
- /* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
- } else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
- }
-
-done:
- mutex_unlock(&gsp->cmdq.mutex);
- return repv;
-}
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
- .rpc_get = r535_gsp_rpc_get,
- .rpc_push = r535_gsp_rpc_push,
- .rpc_done = r535_gsp_rpc_done,
-
- .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
- .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
- .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
-
- .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
- .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
- .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
-
- .rm_free = r535_gsp_rpc_rm_free,
-
- .client_ctor = r535_gsp_client_ctor,
- .client_dtor = r535_gsp_client_dtor,
-
- .device_ctor = r535_gsp_device_ctor,
- .device_dtor = r535_gsp_device_dtor,
-
- .event_ctor = r535_gsp_device_event_ctor,
- .event_dtor = r535_gsp_event_dtor,
-};
-
static void
r535_gsp_msgq_work(struct work_struct *work)
{
@@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth)
return IRQ_HANDLED;
}
+static bool
+r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
{
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret = 0;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
- switch (ctrl->table[i].engineIdx) {
- case MC_ENGINE_IDX_GSP:
- type = NVKM_SUBDEV_GSP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_DISP:
- type = NVKM_ENGINE_DISP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
- type = NVKM_ENGINE_CE;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
- break;
- case MC_ENGINE_IDX_GR0:
- type = NVKM_ENGINE_GR;
- inst = 0;
- break;
- case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
- type = NVKM_ENGINE_NVDEC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
- break;
- case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
- type = NVKM_ENGINE_NVENC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
- break;
- case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
- type = NVKM_ENGINE_NVJPG;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
- break;
- case MC_ENGINE_IDX_OFA0:
- type = NVKM_ENGINE_OFA;
- inst = 0;
- break;
- default:
+ if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
continue;
- }
if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
ret = -ENOSPC;
@@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
return ret;
}
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+void
+r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info)
{
- GspStaticConfigInfo *rpc;
int last_usable = -1;
- rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
- if (IS_ERR(rpc))
- return PTR_ERR(rpc);
-
- gsp->internal.client.object.client = &gsp->internal.client;
- gsp->internal.client.object.parent = NULL;
- gsp->internal.client.object.handle = rpc->hInternalClient;
- gsp->internal.client.gsp = gsp;
-
- gsp->internal.device.object.client = &gsp->internal.client;
- gsp->internal.device.object.parent = &gsp->internal.client.object;
- gsp->internal.device.object.handle = rpc->hInternalDevice;
-
- gsp->internal.device.subdevice.client = &gsp->internal.client;
- gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
- gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
- gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
- gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
- for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
- NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
- &rpc->fbRegionInfoParams.fbRegion[i];
+ for (int i = 0; i < info->numFBRegions; i++) {
+ const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i];
nvkm_debug(&gsp->subdev, "fb region %d: "
"%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
@@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
if (last_usable >= 0) {
- u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+ u32 rsvd_base = info->fbRegion[last_usable].limit + 1;
gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
}
+}
+
+static int
+r535_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
@@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
-static void
+void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
@@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
* so we take a device reference to ensure its lifetime. The reference is
* dropped in the destructor.
*/
-static int
+int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
@@ -1277,9 +293,10 @@ static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret;
- ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ ret = rmapi->gsp->get_static_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1692,7 +709,7 @@ fail:
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
+void
r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
{
const guid_t NVOP_DSM_GUID =
@@ -1702,7 +719,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
.buffer.length = 4,
- .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
}, *obj;
caps->status = 0xffff;
@@ -1710,23 +726,28 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
return;
+ argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL);
+ if (!argv4.buffer.pointer)
+ return;
+
obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
if (!obj)
- return;
+ goto done;
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
- return;
+ goto done;
caps->status = 0;
caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+done:
ACPI_FREE(obj);
kfree(argv4.buffer.pointer);
}
-static void
+void
r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
{
const guid_t JT_DSM_GUID =
@@ -1737,24 +758,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
.buffer.length = sizeof(caps),
- .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
}, *obj;
jt->status = 0xffff;
+ argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL);
+ if (!argv4.buffer.pointer)
+ return;
+
obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
if (!obj)
- return;
+ goto done;
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
- return;
+ goto done;
jt->status = 0;
jt->jtCaps = *(u32 *)obj->buffer.pointer;
jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
jt->bSBIOSCaps = 0;
+done:
ACPI_FREE(obj);
kfree(argv4.buffer.pointer);
@@ -1818,7 +843,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux
}
}
-static void
+void
r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
{
acpi_status status;
@@ -1871,7 +896,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
}
static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+r535_gsp_set_system_info(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
@@ -1884,16 +909,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
if (IS_ERR(info))
return PTR_ERR(info);
- info->gpuPhysAddr = device->func->resource_addr(device, 0);
- info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
- info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
info->maxUserVa = TASK_SIZE;
- info->pciConfigMirrorBase = 0x088000;
- info->pciConfigMirrorSize = 0x001000;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
@@ -1911,33 +936,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
- rpc_rc_triggered_v17_02 *msg = repv;
- struct nvkm_gsp *gsp = priv;
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_chan *chan;
- unsigned long flags;
-
- if (WARN_ON(repc < sizeof(*msg)))
- return -EINVAL;
-
- nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
- msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
- msg->partitionAttributionId);
-
- chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
- if (!chan) {
- nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
- return 0;
- }
-
- nvkm_chan_error(chan, false);
- nvkm_chan_put(&chan, flags);
- return 0;
-}
-
-static int
r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -2130,97 +1128,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_device *device = subdev->device;
- u32 wpr2_hi;
- int ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (!wpr2_hi) {
- nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
- return 0;
- }
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (WARN_ON(ret))
- return ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (WARN_ON(wpr2_hi))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- int ret;
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (ret)
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
- if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
- GspFwWprMeta *meta;
- int ret;
-
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
- if (ret)
- return ret;
-
- meta = gsp->wpr_meta.data;
-
- meta->magic = GSP_FW_WPR_META_MAGIC;
- meta->revision = GSP_FW_WPR_META_REVISION;
-
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
- meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
- meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
- meta->sizeOfBootloader = gsp->boot.fw.size;
- meta->bootloaderCodeOffset = gsp->boot.code_offset;
- meta->bootloaderDataOffset = gsp->boot.data_offset;
- meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
- meta->sysmemAddrOfSignature = gsp->sig.addr;
- meta->sizeOfSignature = gsp->sig.size;
-
- meta->gspFwRsvdStart = gsp->fb.heap.addr;
- meta->nonWprHeapOffset = gsp->fb.heap.addr;
- meta->nonWprHeapSize = gsp->fb.heap.size;
- meta->gspFwWprStart = gsp->fb.wpr2.addr;
- meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
- meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
- meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
- meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
- meta->frtsOffset = gsp->fb.wpr2.frts.addr;
- meta->frtsSize = gsp->fb.wpr2.frts.size;
- meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
- meta->fbSize = gsp->fb.size;
- meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
- meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
- meta->bootCount = 0;
- meta->partitionRpcAddr = 0;
- meta->partitionRpcRequestOffset = 0;
- meta->partitionRpcReplyOffset = 0;
- meta->verified = 0;
- return 0;
-}
-
-static int
r535_gsp_shared_init(struct nvkm_gsp *gsp)
{
struct {
@@ -2271,23 +1178,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp)
return 0;
}
-static int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+static void
+r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
{
- GSP_ARGUMENTS_CACHED *args;
- int ret;
-
- if (!resume) {
- ret = r535_gsp_shared_init(gsp);
- if (ret)
- return ret;
+ GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data;
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
- if (ret)
- return ret;
- }
-
- args = gsp->rmargs.data;
args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
args->messageQueueInitArguments.cmdQueueOffset =
@@ -2304,7 +1199,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
args->srInitArguments.flags = 0;
args->srInitArguments.bInPMTransition = 1;
}
+}
+
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+ gsp->rm->api->gsp->set_rmargs(gsp, resume);
return 0;
}
@@ -2797,18 +1709,22 @@ lvl1_fail:
return ret;
}
+static u32
+r535_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return meta->gspFwWprEnd - meta->gspFwWprStart;
+}
+
int
r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
{
- u32 mbox0 = 0xff, mbox1 = 0xff;
+ struct nvkm_rm *rm = gsp->rm;
int ret;
- if (!gsp->running)
- return 0;
-
if (suspend) {
- GspFwWprMeta *meta = gsp->wpr_meta.data;
- u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ u32 len = rm->api->gsp->sr_data_size(gsp);
GspFwSRMeta *sr;
ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
@@ -2829,8 +1745,20 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ ret = rm->api->fbsr->suspend(gsp);
+ if (ret) {
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
+
+ /*
+ * TODO: Debug the GSP firmware / RPC handling to find out why
+ * without this Turing (but none of the other architectures)
+ * ends up resetting all channels after resume.
+ */
+ msleep(50);
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
@@ -2838,18 +1766,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
return ret;
nvkm_msec(gsp->subdev.device, 2000,
- if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
break;
);
- nvkm_falcon_reset(&gsp->falcon);
-
- ret = nvkm_gsp_fwsec_sb(gsp);
- WARN_ON(ret);
-
- ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
- WARN_ON(ret);
-
gsp->running = false;
return 0;
}
@@ -2857,23 +1777,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
int
r535_gsp_init(struct nvkm_gsp *gsp)
{
- u32 mbox0, mbox1;
int ret;
- if (!gsp->sr.meta.data) {
- mbox0 = lower_32_bits(gsp->wpr_meta.addr);
- mbox1 = upper_32_bits(gsp->wpr_meta.addr);
- } else {
- r535_gsp_rmargs_init(gsp, true);
-
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
- }
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
- /* Execute booter to handle (eventually...) booting GSP-RM. */
- ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
- if (WARN_ON(ret))
- goto done;
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
if (ret)
@@ -2883,6 +1792,8 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
+ gsp->rm->api->fbsr->resume(gsp);
+
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
@@ -2944,19 +1855,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u
return -ENOENT;
}
-static void
-r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
-{
- nvkm_firmware_put(gsp->fws.bl);
- gsp->fws.bl = NULL;
- nvkm_firmware_put(gsp->fws.booter.unload);
- gsp->fws.booter.unload = NULL;
- nvkm_firmware_put(gsp->fws.booter.load);
- gsp->fws.booter.load = NULL;
- nvkm_firmware_put(gsp->fws.rm);
- gsp->fws.rm = NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log {
@@ -3190,10 +2088,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_falcon_fw_dtor(&gsp->booter.unload);
nvkm_falcon_fw_dtor(&gsp->booter.load);
+ nvkm_gsp_mem_dtor(&gsp->fmc.args);
+ kfree(gsp->fmc.sig);
+ kfree(gsp->fmc.pkey);
+ kfree(gsp->fmc.hash);
+ nvkm_gsp_mem_dtor(&gsp->fmc.fw);
+
mutex_destroy(&gsp->msgq.mutex);
mutex_destroy(&gsp->cmdq.mutex);
- r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
@@ -3206,10 +2110,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_gsp_mem_dtor(&gsp->logrm);
}
+static void
+r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp)
+{
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+}
+
int
r535_gsp_oneinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
const u8 *data;
u64 size;
int ret;
@@ -3217,16 +2128,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
mutex_init(&gsp->cmdq.mutex);
mutex_init(&gsp->msgq.mutex);
- ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
- &device->sec2->falcon, &gsp->booter.load);
- if (ret)
- return ret;
-
- ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
- &device->sec2->falcon, &gsp->booter.unload);
- if (ret)
- return ret;
-
/* Load GSP firmware from ELF image into DMA-accessible memory. */
ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
if (ret)
@@ -3255,65 +2156,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
- r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+ if (rmapi->gsp->drop_send_user_shared_data)
+ rmapi->gsp->drop_send_user_shared_data(gsp);
+ if (rmapi->gsp->drop_post_nocat_record)
+ rmapi->gsp->drop_post_nocat_record(gsp);
+
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
/* Release FW images - we've copied them to DMA buffers now. */
- r535_gsp_dtor_fws(gsp);
-
- /* Calculate FB layout. */
- gsp->fb.wpr2.frts.size = 0x100000;
- gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
- gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
- gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
- gsp->fb.wpr2.elf.size = gsp->fw.len;
- gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
- {
- u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
- gsp->fb.wpr2.heap.size =
- gsp->func->wpr_heap.os_carveout_size +
- gsp->func->wpr_heap.base_size +
- ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
- ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
- gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
- }
-
- gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
- gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
- gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
- gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
- gsp->fb.heap.size = 0x100000;
- gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
+ nvkm_gsp_dtor_fws(gsp);
ret = r535_gsp_libos_init(gsp);
if (WARN_ON(ret))
return ret;
- ret = r535_gsp_wpr_meta_init(gsp);
- if (WARN_ON(ret))
- return ret;
-
- ret = r535_gsp_rpc_set_system_info(gsp);
+ ret = rmapi->gsp->set_system_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -3321,76 +2186,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
if (WARN_ON(ret))
return ret;
- /* Reset GSP into RISC-V mode. */
- ret = gsp->func->reset(gsp);
- if (WARN_ON(ret))
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
- nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
mutex_init(&gsp->client_id.mutex);
idr_init(&gsp->client_id.idr);
return 0;
}
-static int
-r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
- const struct firmware **pfw)
-{
- char fwname[64];
-
- snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
- return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
-}
-
-int
-r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- int ret;
- bool enable_gsp = fwif->enable;
-
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
- return -EINVAL;
-
- if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
- (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
- r535_gsp_dtor_fws(gsp);
- return ret;
- }
-
- return 0;
-}
-
-#define NVKM_GSP_FIRMWARE(chip) \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
-
-NVKM_GSP_FIRMWARE(tu102);
-NVKM_GSP_FIRMWARE(tu104);
-NVKM_GSP_FIRMWARE(tu106);
-
-NVKM_GSP_FIRMWARE(tu116);
-NVKM_GSP_FIRMWARE(tu117);
-
-NVKM_GSP_FIRMWARE(ga100);
-
-NVKM_GSP_FIRMWARE(ga102);
-NVKM_GSP_FIRMWARE(ga103);
-NVKM_GSP_FIRMWARE(ga104);
-NVKM_GSP_FIRMWARE(ga106);
-NVKM_GSP_FIRMWARE(ga107);
-
-NVKM_GSP_FIRMWARE(ad102);
-NVKM_GSP_FIRMWARE(ad103);
-NVKM_GSP_FIRMWARE(ad104);
-NVKM_GSP_FIRMWARE(ad106);
-NVKM_GSP_FIRMWARE(ad107);
+const struct nvkm_rm_api_gsp
+r535_gsp = {
+ .set_rmargs = r535_gsp_set_rmargs,
+ .set_system_info = r535_gsp_set_system_info,
+ .get_static_info = r535_gsp_get_static_info,
+ .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx,
+ .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data,
+ .sr_data_size = r535_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
new file mode 100644
index 000000000000..a8c42ec0367b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvdec.h"
+
+static int
+r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvdec)
+{
+ NV_BSP_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvdec, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvdec = {
+ .alloc = r535_nvdec_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
new file mode 100644
index 000000000000..acb3ce8bb9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvenc.h"
+
+static int
+r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvenc)
+{
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvenc, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvenc = {
+ .alloc = r535_nvenc_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644
index 000000000000..fbc4080ad8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvjpg.h"
+
+static int
+r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvjpg)
+{
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvjpg, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvjpg = {
+ .alloc = r535_nvjpg_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
new file mode 100644
index 000000000000..cbc7e611fbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ALLOC_H__
+#define __NVRM_ALLOC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
new file mode 100644
index 000000000000..60b0b08491ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_BAR_H__
+#define __NVRM_BAR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
new file mode 100644
index 000000000000..90b0325203d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CE_H__
+#define __NVRM_CE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
new file mode 100644
index 000000000000..df0e63c0cb6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
new file mode 100644
index 000000000000..77f10acd82c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CTRL_H__
+#define __NVRM_CTRL_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
new file mode 100644
index 000000000000..3933b9ad61ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DEVICE_H__
+#define __NVRM_DEVICE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
new file mode 100644
index 000000000000..7b7539639540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1) // System memory (PCI)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
new file mode 100644
index 000000000000..b26dfc8f8087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_MSENC 38
+#define MC_ENGINE_IDX_MSENC1 39
+#define MC_ENGINE_IDX_MSENC2 40
+#define MC_ENGINE_IDX_C2C 41
+#define MC_ENGINE_IDX_LTC 42
+#define MC_ENGINE_IDX_FBHUB 43
+#define MC_ENGINE_IDX_HDACODEC 44
+#define MC_ENGINE_IDX_GMMU 45
+#define MC_ENGINE_IDX_SEC2 46
+#define MC_ENGINE_IDX_FSP 47
+#define MC_ENGINE_IDX_NVLINK 48
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 51
+#define MC_ENGINE_IDX_NVJPEG2 52
+#define MC_ENGINE_IDX_NVJPEG3 53
+#define MC_ENGINE_IDX_NVJPEG4 54
+#define MC_ENGINE_IDX_NVJPEG5 55
+#define MC_ENGINE_IDX_NVJPEG6 56
+#define MC_ENGINE_IDX_NVJPEG7 57
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58
+#define MC_ENGINE_IDX_ACCESS_CNTR 59
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_INFO_FAULT 63
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 65
+#define MC_ENGINE_IDX_NVDEC2 66
+#define MC_ENGINE_IDX_NVDEC3 67
+#define MC_ENGINE_IDX_NVDEC4 68
+#define MC_ENGINE_IDX_NVDEC5 69
+#define MC_ENGINE_IDX_NVDEC6 70
+#define MC_ENGINE_IDX_NVDEC7 71
+#define MC_ENGINE_IDX_CPU_DOORBELL 72
+#define MC_ENGINE_IDX_PRIV_DOORBELL 73
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 74
+#define MC_ENGINE_IDX_BLG 75
+#define MC_ENGINE_IDX_PERFMON 76
+#define MC_ENGINE_IDX_BUF_RESET 77
+#define MC_ENGINE_IDX_XBAR 78
+#define MC_ENGINE_IDX_ZPW 79
+#define MC_ENGINE_IDX_OFA0 80
+#define MC_ENGINE_IDX_TEGRA 81
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 83
+#define MC_ENGINE_IDX_GR2 84
+#define MC_ENGINE_IDX_GR3 85
+#define MC_ENGINE_IDX_GR4 86
+#define MC_ENGINE_IDX_GR5 87
+#define MC_ENGINE_IDX_GR6 88
+#define MC_ENGINE_IDX_GR7 89
+#define MC_ENGINE_IDX_ESCHED 90
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 154
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 155
+#define MC_ENGINE_IDX_GR2_FECS_LOG 156
+#define MC_ENGINE_IDX_GR3_FECS_LOG 157
+#define MC_ENGINE_IDX_GR4_FECS_LOG 158
+#define MC_ENGINE_IDX_GR5_FECS_LOG 159
+#define MC_ENGINE_IDX_GR6_FECS_LOG 160
+#define MC_ENGINE_IDX_GR7_FECS_LOG 161
+#define MC_ENGINE_IDX_TMR_SWRL 162
+#define MC_ENGINE_IDX_DISP_GSP 163
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
+#define MC_ENGINE_IDX_PXUC 166
+#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
+#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
+#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
+#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x))
+#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x))
+#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x))
+#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x))
+#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x))
+#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_LAST (0x0000003e)
+#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff)
+#define NV2080_ENGINE_TYPE_COPY_SIZE 10
+#define NV2080_ENGINE_TYPE_NVENC_SIZE 3
+#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8
+#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8
+#define NV2080_ENGINE_TYPE_GR_SIZE 8
+#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i))
+#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9))
+#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0)
+#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0)
+#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0)
+#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE)))
+#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0)
+#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i))
+#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE)))
+#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0)
+#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST)))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
new file mode 100644
index 000000000000..057f7220c225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_EVENT_H__
+#define __NVRM_EVENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
new file mode 100644
index 000000000000..28786ef013a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
new file mode 100644
index 000000000000..325fdd8b6090
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
index 6acb3f73242d..82c5ec727bb4 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
@@ -1,30 +1,31 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
@@ -54,4 +55,19 @@
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#include "fifo.h"
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
new file mode 100644
index 000000000000..b6683a5bf870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
+} PACKED_REGISTRY_TABLE;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef NvU64 LibosAddress;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+
+#define GSP_FW_SR_META_REVISION 2
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+typedef union rpc_message_rpc_union_field_v03_00
+{
+ NvU32 spare;
+ NvU32 cpuRmGfid;
+} rpc_message_rpc_union_field_v03_00;
+
+typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v;
+
+typedef struct rpc_message_header_v03_00
+{
+ NvU32 header_version;
+ NvU32 signature;
+ NvU32 length;
+ NvU32 function;
+ NvU32 rpc_result;
+ NvU32 rpc_result_private;
+ NvU32 sequence;
+ rpc_message_rpc_union_field_v u;
+ rpc_generic_union rpc_message_data[];
+} rpc_message_header_v03_00;
+
+typedef rpc_message_header_v03_00 rpc_message_header_v;
+
+typedef struct GSP_MSG_QUEUE_ELEMENT
+{
+ NvU8 authTagBuffer[16]; // Authentication tag buffer.
+ NvU8 aadBuffer[16]; // AAD buffer.
+ NvU32 checkSum; // Set to value needed to make checksum always zero.
+ NvU32 seqNum; // Sequence number maintained by the message queue.
+ NvU32 elemCount; // Number of message queue elements this message has.
+ NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8);
+} GSP_MSG_QUEUE_ELEMENT;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
new file mode 100644
index 000000000000..642c13aec325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
new file mode 100644
index 000000000000..3a04e702677f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVDEC_H__
+#define __NVRM_NVDEC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
new file mode 100644
index 000000000000..203c1d5304d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVENC_H__
+#define __NVRM_NVENC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
new file mode 100644
index 000000000000..71fc53889ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVJPG_H__
+#define __NVRM_NVJPG_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
new file mode 100644
index 000000000000..49d81c7673d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
index 73c57f235f6a..2a037acc6b1e 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
@@ -1,5 +1,10 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
@@ -209,54 +214,12 @@ enum {
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, INVALIDATE_TLB) // 200
+ X(RM, RESERVED_201) // 201
+ X(RM, ECC_NOTIFIER_WRITE_ACK) // 202
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
-
-#ifndef E
-# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
-# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-enum {
-#endif
- E(FIRST_EVENT = 0x1000) // 0x1000
- E(GSP_INIT_DONE) // 0x1001
- E(GSP_RUN_CPU_SEQUENCER) // 0x1002
- E(POST_EVENT) // 0x1003
- E(RC_TRIGGERED) // 0x1004
- E(MMU_FAULT_QUEUED) // 0x1005
- E(OS_ERROR_LOG) // 0x1006
- E(RG_LINE_INTR) // 0x1007
- E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
- E(SIM_READ) // 0x1009
- E(SIM_WRITE) // 0x100a
- E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
- E(UCODE_LIBOS_PRINT) // 0x100c
- E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
- E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
- E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
- E(VGPU_CONFIG) // 0x1010
- E(DISPLAY_MODESET) // 0x1011
- E(EXTDEV_INTR_SERVICE) // 0x1012
- E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
- E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
- E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
- E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
- E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
- E(TIMED_SEMAPHORE_RELEASE) // 0x1018
- E(NVLINK_IS_GPU_DEGRADED) // 0x1019
- E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
- E(GSP_SEND_USER_SHARED_DATA) // 0x101b
- E(NVLINK_FAULT_UP) // 0x101c
- E(GSP_LOCKDOWN_NOTICE) // 0x101d
- E(MIG_CI_CONFIG_UPDATE) // 0x101e
- E(NUM_EVENTS) // END
-#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-};
-# undef E
-# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
new file mode 100644
index 000000000000..f6ec04efd119
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_VMM_H__
+#define __NVRM_VMM_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_
+ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+ NvU32 numEntries;
+ NvU32 flags;
+ NvHandle hVASpace;
+ NvU32 chId;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+ NvU32 pasid;
+} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS;
+
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U)
+
+#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS {
+ NvHandle hVASpace;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
index 932934227b9c..2156808cba4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
@@ -19,26 +19,26 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/ofa.h"
-#include <nvif/class.h>
+static int
+r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ga100_nvdec = {
- .sclass = {
- { -1, -1, NVC6B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
}
+
+const struct nvkm_rm_api_engine
+r535_ofa = {
+ .alloc = r535_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
new file mode 100644
index 000000000000..a4190676e1ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_api
+r535_api = {
+ .gsp = &r535_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r535_client,
+ .device = &r535_device,
+ .fbsr = &r535_fbsr,
+ .disp = &r535_disp,
+ .fifo = &r535_fifo,
+ .ce = &r535_ce,
+ .gr = &r535_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r535_ofa,
+};
+
+const struct nvkm_rm_impl
+r535_rm_tu102 = {
+ .wpr = &r535_wpr_libos2,
+ .api = &r535_api,
+};
+
+const struct nvkm_rm_impl
+r535_rm_ga102 = {
+ .wpr = &r535_wpr_libos3,
+ .api = &r535_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
new file mode 100644
index 000000000000..9d06ff722fea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/rpcfn.h"
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EBUSY;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
+{
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return -EINVAL;
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return -ETIMEDOUT;
+
+ return used;
+}
+
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
+
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
+
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
+
+ expected -= len;
+
+ if (expected) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(buf + len, mqe, expected);
+ }
+
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return buf;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
+{
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ struct r535_gsp_msg *cqe;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size, step, len;
+ u32 off = 0;
+
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(msg);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
+
+ memcpy(cqe, (u8 *)msg + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ len -= size;
+ } while (len);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(msg);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
+{
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
+}
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
+
+retry:
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
+
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EIO);
+ }
+
+ return rpc;
+ }
+
+ r535_gsp_msg_done(gsp, rpc);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == rpc->function) {
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, rpc);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ u32 fn = rpc->function;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + payload_size;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (payload_size > max_payload_size) {
+ const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
+ void *next;
+
+ /* Send initial RPC. */
+ next = r535_gsp_rpc_get(gsp, fn, max_payload_size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, max_payload_size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += size;
+ remain_payload_size -= size;
+ }
+
+ /* Wait for reply. */
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
+ if (!IS_ERR(repv))
+ kvfree(msg);
+ } else {
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_rm_api_rpc
+r535_rpc = {
+ .get = r535_gsp_rpc_get,
+ .push = r535_gsp_rpc_push,
+ .done = r535_gsp_rpc_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
index d3e95453f25d..f25ea610cd99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -19,15 +19,38 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "vmm.h"
+#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvhw/drf.h>
+#include "nvrm/vmm.h"
-static int
-r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+void
+r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
+{
+ if (vmm->rm.external) {
+ NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (!IS_ERR(ctrl)) {
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
+ }
+
+ vmm->rm.external = false;
+ }
+
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+}
+
+int
+r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
{
NV_VASPACE_ALLOCATION_PARAMETERS *args;
int ret;
@@ -37,58 +60,103 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
if (ret)
return ret;
- args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
sizeof(*args), &vmm->rm.object);
if (IS_ERR(args))
return PTR_ERR(args);
args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+ if (external)
+ args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
if (ret)
return ret;
- {
+ if (!external) {
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ u8 page_shift = 29; /* 512MiB */
+ const u64 page_size = BIT_ULL(page_shift);
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm_pt *pd = vmm->pd;
+
+ for (page = vmm->func->page; page->shift; page++) {
+ if (page->shift == page_shift)
+ break;
+ }
+
+ if (WARN_ON(!page->shift))
+ return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
&vmm->rm.rsvd);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
+ /* Some parts of RM expect the server-reserved area to be in a specific location. */
+ if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
+ vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
+ return -EINVAL;
+
ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->pageSize = 0x20000000;
+ ctrl->pageSize = page_size;
ctrl->virtAddrLo = vmm->rm.rsvd->addr;
ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
- ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
- ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
- ctrl->levels[0].size = 0x20;
- ctrl->levels[0].aperture = 1;
- ctrl->levels[0].pageShift = 0x2f;
- ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
- ctrl->levels[1].size = 0x1000;
- ctrl->levels[1].aperture = 1;
- ctrl->levels[1].pageShift = 0x26;
- if (vmm->pd->pde[0]->pde[0]) {
- ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
- ctrl->levels[2].size = 0x1000;
- ctrl->levels[2].aperture = 1;
- ctrl->levels[2].pageShift = 0x1d;
+
+ for (desc = page->desc; desc->bits; desc++) {
+ ctrl->numLevelsToCopy++;
+ page_shift += desc->bits;
+ }
+ desc--;
+
+ for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
+ page_shift -= desc->bits;
+
+ ctrl->levels[i].physAddress = pd->pt[0]->addr;
+ ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size;
+ ctrl->levels[i].aperture = 1;
+ ctrl->levels[i].pageShift = page_shift;
+
+ pd = pd->pde[0];
}
ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ } else {
+ NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->physAddress = vmm->pd->pt[0]->addr;
+ ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
+ ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
+ if (ret == 0)
+ vmm->rm.external = true;
}
return ret;
}
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
+}
+
static void
r535_mmu_dtor(struct nvkm_mmu *mmu)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
new file mode 100644
index 000000000000..5db0e7009e1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
new file mode 100644
index 000000000000..87e6240662ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r570_client = {
+ .ctor = r570_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
new file mode 100644
index 000000000000..a96e31c2d80b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <engine/disp.h>
+#include <engine/disp/outp.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/disp.h"
+
+static int
+r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+ args->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ ctrl->pbTargetAperture = PHYS_PCI;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ ctrl->pbTargetAperture = PHYS_PCI_COHERENT;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ ctrl->pbTargetAperture = PHYS_NVM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r570_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+ ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+static int
+r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+const struct nvkm_rm_api_disp
+r570_disp = {
+ .get_static_info = r570_disp_get_static_info,
+ .get_supported = r570_disp_get_supported,
+ .get_connect_state = r570_disp_get_connect_state,
+ .get_active = r570_disp_get_active,
+ .bl_ctrl = r570_bl_ctrl,
+ .dp = {
+ .get_caps = r570_dp_get_caps,
+ .set_indexed_link_rates = r570_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r570_disp_chan_set_pushbuf,
+ .dmac_alloc = r570_dmac_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
new file mode 100644
index 000000000000..2945d5b4e570
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/bar.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/fifo.h"
+
+static int
+r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend)
+{
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bDisableActiveChannels = suspend;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static void
+r570_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct nvkm_vmm *vmm;
+
+ /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ device->bar->bar2 = true;
+
+ vmm = nvkm_bar_bar2_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ vmm = nvkm_bar_bar1_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Resume channel scheduling. */
+ r570_fbsr_suspend_channels(device->gsp, false);
+
+ /* Finish cleaning up. */
+ r535_fbsr_resume(gsp);
+}
+
+static int
+r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST,
+ 0, size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = gsp->internal.client.object.handle;
+ ctrl->hSysMem = memlist.handle;
+ ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr;
+ ctrl->bEnteringGcoffState = 1;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static int
+r570_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ u64 size;
+ int ret;
+
+ /* Stop channel scheduling. */
+ r570_fbsr_suspend_channels(gsp, true);
+
+ /* Save BAR2 allocations to system memory. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable BAR2 access. */
+ device->bar->bar2 = false;
+
+ /* Allocate system memory to hold RM's VRAM allocations across suspend. */
+ size = gsp->fb.heap.size;
+ size += gsp->fb.rsvd_size;
+ size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size);
+
+ ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr);
+ if (ret)
+ return ret;
+
+ /* Initialise FBSR on RM. */
+ ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size);
+ if (ret) {
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct nvkm_rm_api_fbsr
+r570_fbsr = {
+ .suspend = r570_fbsr_suspend,
+ .resume = r570_fbsr_resume,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
new file mode 100644
index 000000000000..79132805cfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo/priv.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/runl.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+#define CHID_PER_USERD 8
+
+static int
+r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d "
+ "fault_addr:%08x%08x fault_type:%08x\n",
+ msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType,
+ msg->scope, msg->partitionAttributionId,
+ msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r570_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( COPY10, CE, 10);
+ case RM_ENGINE_TYPE( COPY11, CE, 11);
+ case RM_ENGINE_TYPE( COPY12, CE, 12);
+ case RM_ENGINE_TYPE( COPY13, CE, 13);
+ case RM_ENGINE_TYPE( COPY14, CE, 14);
+ case RM_ENGINE_TYPE( COPY15, CE, 15);
+ case RM_ENGINE_TYPE( COPY16, CE, 16);
+ case RM_ENGINE_TYPE( COPY17, CE, 17);
+ case RM_ENGINE_TYPE( COPY18, CE, 18);
+ case RM_ENGINE_TYPE( COPY19, CE, 19);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE( NVENC3, NVENC, 3);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA0, OFA, 0);
+ case RM_ENGINE_TYPE( OFA1, OFA, 1);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+const struct nvkm_rm_api_fifo
+r570_fifo = {
+ .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type,
+ .ectx_size = r570_fifo_ectx_size,
+ .rsvd_chids = 1,
+ .rc_triggered = r570_fifo_rc_triggered,
+ .chan = {
+ .alloc = r570_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
new file mode 100644
index 000000000000..b6cced9b8aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/gr.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+#include <engine/fifo/chid.h>
+#include <engine/gr/priv.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/engine.h"
+
+int
+r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->gpcId = gpc;
+
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pmask = ctrl->tpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+int
+r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->gpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
+{
+ NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bTeardown = teardown;
+
+ return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
+}
+
+static void
+r570_gr_scrubber_fini(struct r535_gr *gr)
+{
+ /* Teardown scrubber channel on RM. */
+ if (gr->scrubber.enabled) {
+ WARN_ON(r570_gr_scrubber_ctrl(gr, true));
+ gr->scrubber.enabled = false;
+ }
+
+ /* Free scrubber channel. */
+ nvkm_gsp_rm_free(&gr->scrubber.threed);
+ nvkm_gsp_rm_free(&gr->scrubber.chan);
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
+ nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
+ }
+
+ nvkm_vmm_unref(&gr->scrubber.vmm);
+ nvkm_memory_unref(&gr->scrubber.inst);
+}
+
+static int
+r570_gr_scrubber_init(struct r535_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ int ret;
+
+ /* Scrubber channel only required on TU10x. */
+ switch (device->chipset) {
+ case 0x162:
+ case 0x164:
+ case 0x166:
+ break;
+ default:
+ return 0;
+ }
+
+ if (gr->scrubber.chid < 0) {
+ gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
+ if (gr->scrubber.chid < 0)
+ return gr->scrubber.chid;
+ }
+
+ /* Allocate scrubber channel. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
+ &gr->scrubber.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
+ &gr->scrubber.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
+ NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
+ nvkm_memory_addr(gr->scrubber.inst),
+ nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
+ nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
+ gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
+ gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
+ rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
+ if (ret)
+ goto done;
+
+ /* Initialise scrubber channel on RM. */
+ ret = r570_gr_scrubber_ctrl(gr, false);
+ if (ret)
+ goto done;
+
+ gr->scrubber.enabled = true;
+
+done:
+ if (ret)
+ r570_gr_scrubber_fini(gr);
+
+ return ret;
+}
+
+static int
+r570_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+const struct nvkm_rm_api_gr
+r570_gr = {
+ .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
+ .scrubber.init = r570_gr_scrubber_init,
+ .scrubber.fini = r570_gr_scrubber_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
new file mode 100644
index 000000000000..9d2fa4e66d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+#include <rm/rpc.h>
+
+#include <asm-generic/video.h>
+
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+
+#include <core/pci.h>
+#include <subdev/pci/priv.h>
+
+static u32
+r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return (meta->frtsOffset + meta->frtsSize) -
+ (meta->nonWprHeapOffset + meta->nonWprHeapSize);
+}
+
+static void
+r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
+{
+ if (gsp->subdev.debug < NV_DBG_DEBUG) {
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
+ }
+}
+
+static bool
+r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+r570_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ u32 gpc_mask;
+ u32 tpc_mask;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+ INIT_LIST_HEAD(&gsp->internal.client.events);
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
+
+ if (gsp->rm->wpr->offset_set_by_acr) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
+ meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ ret = r570_gr_gpc_mask(gsp, &gpc_mask);
+ if (ret)
+ return ret;
+
+ for (int gpc = 0; gpc < 32; gpc++) {
+ if (gpc_mask & BIT(gpc)) {
+ ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
+ if (ret)
+ return ret;
+
+ gsp->gr.tpcs += hweight32(tpc_mask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r570_gsp_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
+ info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
+ info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
+ info->PCIRevisionID = pdev->revision;
+ r570_gsp_acpi_info(gsp, &info->acpiMethodData);
+ info->bIsPrimary = video_is_primary_device(device->dev);
+ info->bPreserveVideoMemoryAllocations = false;
+
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static void
+r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ args->bDmemStack = 1;
+}
+
+const struct nvkm_rm_api_gsp
+r570_gsp = {
+ .set_rmargs = r570_gsp_set_rmargs,
+ .set_system_info = r570_gsp_set_system_info,
+ .get_static_info = r570_gsp_get_static_info,
+ .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
+ .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
+ .sr_data_size = r570_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
new file mode 100644
index 000000000000..e8714e0abc37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+ NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8);
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
new file mode 100644
index 000000000000..06e972835d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+ NvU32 embeddedDisplayPortMask;
+ NvBool bExternalMuxSupported;
+ NvBool bInternalMuxSupported;
+ NvU32 numDispChannels;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupportedByGpu;
+ NvU32 minPClkForCompressed;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NvBool bUseRgFlushSequence;
+ NvBool bSupportDPDownSpread;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 UHBRSupportedByDfp;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+ NvU8 brightnessType;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvBool bEnableAudioOverRightPanel;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+ NvU32 pbTargetAperture;
+ NvU32 channelPBSize;
+ NvU32 subDeviceId;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef enum
+{
+ PB_SIZE_4KB = 0,
+ PB_SIZE_8KB,
+ PB_SIZE_16KB,
+ PB_SIZE_32KB,
+ PB_SIZE_64KB
+} ChannelPBSize;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+ ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum)
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+ NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method)
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3
+
+typedef enum
+{
+ IOVA,
+ PHYS_NVM,
+ PHYS_PCI,
+ PHYS_PCI_COHERENT
+} PBTARGETAPERTURE;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
new file mode 100644
index 000000000000..7997050a4f29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE10 25
+#define MC_ENGINE_IDX_CE11 26
+#define MC_ENGINE_IDX_CE12 27
+#define MC_ENGINE_IDX_CE13 28
+#define MC_ENGINE_IDX_CE14 29
+#define MC_ENGINE_IDX_CE15 30
+#define MC_ENGINE_IDX_CE16 31
+#define MC_ENGINE_IDX_CE17 32
+#define MC_ENGINE_IDX_CE18 33
+#define MC_ENGINE_IDX_CE19 34
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_NVENC 38
+#define MC_ENGINE_IDX_NVENC1 39
+#define MC_ENGINE_IDX_NVENC2 40
+#define MC_ENGINE_IDX_NVENC3 41
+#define MC_ENGINE_IDX_C2C 42
+#define MC_ENGINE_IDX_LTC 43
+#define MC_ENGINE_IDX_FBHUB 44
+#define MC_ENGINE_IDX_HDACODEC 45
+#define MC_ENGINE_IDX_GMMU 46
+#define MC_ENGINE_IDX_SEC2 47
+#define MC_ENGINE_IDX_FSP 48
+#define MC_ENGINE_IDX_NVLINK 49
+#define MC_ENGINE_IDX_GSP 50
+#define MC_ENGINE_IDX_NVJPG 51
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 52
+#define MC_ENGINE_IDX_NVJPEG2 53
+#define MC_ENGINE_IDX_NVJPEG3 54
+#define MC_ENGINE_IDX_NVJPEG4 55
+#define MC_ENGINE_IDX_NVJPEG5 56
+#define MC_ENGINE_IDX_NVJPEG6 57
+#define MC_ENGINE_IDX_NVJPEG7 58
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59
+#define MC_ENGINE_IDX_ACCESS_CNTR 60
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63
+#define MC_ENGINE_IDX_INFO_FAULT 64
+#define MC_ENGINE_IDX_BSP 65
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 66
+#define MC_ENGINE_IDX_NVDEC2 67
+#define MC_ENGINE_IDX_NVDEC3 68
+#define MC_ENGINE_IDX_NVDEC4 69
+#define MC_ENGINE_IDX_NVDEC5 70
+#define MC_ENGINE_IDX_NVDEC6 71
+#define MC_ENGINE_IDX_NVDEC7 72
+#define MC_ENGINE_IDX_CPU_DOORBELL 73
+#define MC_ENGINE_IDX_PRIV_DOORBELL 74
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 75
+#define MC_ENGINE_IDX_BLG 76
+#define MC_ENGINE_IDX_PERFMON 77
+#define MC_ENGINE_IDX_BUF_RESET 78
+#define MC_ENGINE_IDX_XBAR 79
+#define MC_ENGINE_IDX_ZPW 80
+#define MC_ENGINE_IDX_OFA0 81
+#define MC_ENGINE_IDX_OFA1 82
+#define MC_ENGINE_IDX_TEGRA 83
+#define MC_ENGINE_IDX_GR 84
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 85
+#define MC_ENGINE_IDX_GR2 86
+#define MC_ENGINE_IDX_GR3 87
+#define MC_ENGINE_IDX_GR4 88
+#define MC_ENGINE_IDX_GR5 89
+#define MC_ENGINE_IDX_GR6 90
+#define MC_ENGINE_IDX_GR7 91
+#define MC_ENGINE_IDX_ESCHED 92
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 156
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 157
+#define MC_ENGINE_IDX_GR2_FECS_LOG 158
+#define MC_ENGINE_IDX_GR3_FECS_LOG 159
+#define MC_ENGINE_IDX_GR4_FECS_LOG 160
+#define MC_ENGINE_IDX_GR5_FECS_LOG 161
+#define MC_ENGINE_IDX_GR6_FECS_LOG 162
+#define MC_ENGINE_IDX_GR7_FECS_LOG 163
+#define MC_ENGINE_IDX_TMR_SWRL 164
+#define MC_ENGINE_IDX_DISP_GSP 165
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167
+#define MC_ENGINE_IDX_PXUC 168
+#define MC_ENGINE_IDX_SYSLTC 169
+#define MC_ENGINE_IDX_LRCC 170
+#define MC_ENGINE_IDX_GSPLITE 171
+#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE
+#define MC_ENGINE_IDX_GSPLITE1 172
+#define MC_ENGINE_IDX_GSPLITE2 173
+#define MC_ENGINE_IDX_GSPLITE3 174
+#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3
+#define MC_ENGINE_IDX_DPAUX 175
+#define MC_ENGINE_IDX_DISP_LOW 176
+#define MC_ENGINE_IDX_MAX 177
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_COPY10 = (0x00000013),
+ RM_ENGINE_TYPE_COPY11 = (0x00000014),
+ RM_ENGINE_TYPE_COPY12 = (0x00000015),
+ RM_ENGINE_TYPE_COPY13 = (0x00000016),
+ RM_ENGINE_TYPE_COPY14 = (0x00000017),
+ RM_ENGINE_TYPE_COPY15 = (0x00000018),
+ RM_ENGINE_TYPE_COPY16 = (0x00000019),
+ RM_ENGINE_TYPE_COPY17 = (0x0000001a),
+ RM_ENGINE_TYPE_COPY18 = (0x0000001b),
+ RM_ENGINE_TYPE_COPY19 = (0x0000001c),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ // Bug 4175886 - Use this new value for all chips once GB20X is released
+ RM_ENGINE_TYPE_NVENC3 = (0x00000028),
+ RM_ENGINE_TYPE_VP = (0x00000029),
+ RM_ENGINE_TYPE_ME = (0x0000002a),
+ RM_ENGINE_TYPE_PPP = (0x0000002b),
+ RM_ENGINE_TYPE_MPEG = (0x0000002c),
+ RM_ENGINE_TYPE_SW = (0x0000002d),
+ RM_ENGINE_TYPE_TSEC = (0x0000002e),
+ RM_ENGINE_TYPE_VIC = (0x0000002f),
+ RM_ENGINE_TYPE_MP = (0x00000030),
+ RM_ENGINE_TYPE_SEC2 = (0x00000031),
+ RM_ENGINE_TYPE_HOST = (0x00000032),
+ RM_ENGINE_TYPE_DPU = (0x00000033),
+ RM_ENGINE_TYPE_PMU = (0x00000034),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d),
+ RM_ENGINE_TYPE_OFA0 = (0x0000003e),
+ RM_ENGINE_TYPE_OFA1 = (0x0000003f),
+ RM_ENGINE_TYPE_RESERVED40 = (0x00000040),
+ RM_ENGINE_TYPE_RESERVED41 = (0x00000041),
+ RM_ENGINE_TYPE_RESERVED42 = (0x00000042),
+ RM_ENGINE_TYPE_RESERVED43 = (0x00000043),
+ RM_ENGINE_TYPE_RESERVED44 = (0x00000044),
+ RM_ENGINE_TYPE_RESERVED45 = (0x00000045),
+ RM_ENGINE_TYPE_RESERVED46 = (0x00000046),
+ RM_ENGINE_TYPE_RESERVED47 = (0x00000047),
+ RM_ENGINE_TYPE_RESERVED48 = (0x00000048),
+ RM_ENGINE_TYPE_RESERVED49 = (0x00000049),
+ RM_ENGINE_TYPE_RESERVED4a = (0x0000004a),
+ RM_ENGINE_TYPE_RESERVED4b = (0x0000004b),
+ RM_ENGINE_TYPE_RESERVED4c = (0x0000004c),
+ RM_ENGINE_TYPE_RESERVED4d = (0x0000004d),
+ RM_ENGINE_TYPE_RESERVED4e = (0x0000004e),
+ RM_ENGINE_TYPE_RESERVED4f = (0x0000004f),
+ RM_ENGINE_TYPE_RESERVED50 = (0x00000050),
+ RM_ENGINE_TYPE_RESERVED51 = (0x00000051),
+ RM_ENGINE_TYPE_RESERVED52 = (0x00000052),
+ RM_ENGINE_TYPE_RESERVED53 = (0x00000053),
+ RM_ENGINE_TYPE_LAST = (0x00000054),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA
+#define NV2080_ENGINE_TYPE_COPY10 (0x00000034)
+#define NV2080_ENGINE_TYPE_COPY11 (0x00000035)
+#define NV2080_ENGINE_TYPE_COPY12 (0x00000036)
+#define NV2080_ENGINE_TYPE_COPY13 (0x00000037)
+#define NV2080_ENGINE_TYPE_COPY14 (0x00000038)
+#define NV2080_ENGINE_TYPE_COPY15 (0x00000039)
+#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a)
+#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b)
+#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c)
+#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d)
+#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e)
+#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053)
+#define NV2080_ENGINE_TYPE_LAST (0x00000054)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
new file mode 100644
index 000000000000..8af432375f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NvBool bEnteringGcoffState;
+ NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8);
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
index 7157c7757698..2b002ca64e0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
@@ -1,31 +1,14 @@
-#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV_MAX_SUBDEVICES 8
typedef struct NV_MEMORY_DESC_PARAMS {
NV_DECLARE_ALIGNED(NvU64 base, 8);
@@ -34,137 +17,197 @@ typedef struct NV_MEMORY_DESC_PARAMS {
NvU32 cacheAttrib;
} NV_MEMORY_DESC_PARAMS;
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+ NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
-
#define NVOS04_FLAGS_VPR 2:2
#define NVOS04_FLAGS_VPR_FALSE 0x00000000
#define NVOS04_FLAGS_VPR_TRUE 0x00000001
-
#define NVOS04_FLAGS_CC_SECURE 2:2
#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
-
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
-
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
-
#define NVOS04_FLAGS_MAP_CHANNEL 30:30
#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
-#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
-#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
-
-typedef struct NV_CHANNEL_ALLOC_PARAMS {
-
- NvHandle hObjectError; // error context DMA
- NvHandle hObjectBuffer; // no longer used
- NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
- NvU32 gpFifoEntries; // number of GP FIFO entries
-
- NvU32 flags;
-
-
- NvHandle hContextShare; // context share handle
- NvHandle hVASpace; // VASpace for the channel
-
- // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
- NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
-
- // offset to beginning of UserD within hUserdMemory[x]
- NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
-
- // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
- NvU32 engineType;
- // Channel identifier that is unique for the duration of a RM session
- NvU32 cid;
- // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
- NvU32 subDeviceId;
- NvHandle hObjectEccError; // ECC error context DMA
-
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
-
- NvHandle hPhysChannelGroup; // reserved
- NvU32 internalFlags; // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
- NvU32 ProcessID; // reserved
- NvU32 SubProcessID; // reserved
- // IV used for CPU-side encryption / GPU-side decryption.
- NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // IV used for CPU-side decryption / GPU-side encryption.
- NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // Nonce used CPU-side signing / GPU-side signature verification.
- NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
-} NV_CHANNEL_ALLOC_PARAMS;
-
-typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
-
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 gfid;
+ NvU32 exceptLevel;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+ NvU32 mmuFaultAddrLo;
+ NvU32 mmuFaultAddrHi;
+ NvU32 mmuFaultType;
+ NvBool bCallbackNeeded;
+ NvU32 rcJournalBufferSize;
+ NvU8 rcJournalBuffer[];
+} rpc_rc_triggered_v17_02;
+
+#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS {
+ NvBool bDisableActiveChannels;
+} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
new file mode 100644
index 000000000000..feed1dabd9d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a)
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042
+#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3)
+#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4)
+
+typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
+ NvBool bTeardown;
+} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
new file mode 100644
index 000000000000..b6075021e74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[9];
+ char chipSKUMod[5];
+ NvU32 skuConfigVersion;
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+ NvBool bNonPowerOf2ChannelCountSupported;
+ NvBool bVfResizableBAR1Supported;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+#define MAX_GROUP_COUNT 2
+
+typedef struct
+{
+ NvU32 ecidLow;
+ NvU32 ecidHigh;
+ NvU32 ecidExtended;
+} EcidManufacturingInfo;
+
+typedef struct
+{
+ NvU64 nonWprHeapOffset;
+ NvU64 frtsOffset;
+} FW_WPR_LAYOUT_OFFSET;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU64 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU64 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGc8Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+ NvBool bIsMigSupported;
+
+ /* "Total Board Power" refers to power requirement of GPU,
+ * while in GC6 state. Majority of this power will be used
+ * to keep V-RAM active to preserve its content.
+ * Some energy maybe consumed by Always-on components on GPU chip.
+ * This power will be provided by 3.3v voltage rail.
+ */
+ NvU16 RTD3GC6TotalBoardPower;
+
+ /* PERST# (i.e. PCI Express Reset) is a sideband signal
+ * generated by the PCIe Host to indicate the PCIe devices,
+ * that the power-rails and the reference-clock are stable.
+ * The endpoint device typically uses this signal as a global reset.
+ */
+ NvU16 RTD3GC6PerstDelay;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+ NvBool bIsEfiInit;
+
+ EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT];
+
+ FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset;
+} GspStaticConfigInfo;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct
+{
+ // Link capabilities
+ NvU32 linkCap;
+} GSP_PCIE_CONFIG_REG;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 gpuPhysIoAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 notifyOpSharedSurfacePhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU32 PCIDeviceID;
+ NvU32 PCISubDeviceID;
+ NvU32 PCIRevisionID;
+ NvU32 pcieAtomicsCplDeviceCapMask;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bFlrSupported;
+ NvBool b64bBar0Supported;
+ NvBool bMnocAvailable;
+ NvU32 chipsetL1ssEnable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvBool bSystemHasMux;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+ NvBool bIsPrimary;
+ NvBool isGridBuild;
+ GSP_PCIE_CONFIG_REG pcieConfigReg;
+ NvU32 gridBuildCsp;
+ NvBool bPreserveVideoMemoryAllocations;
+ NvBool bTdrEventSupported;
+ NvBool bFeatureStretchVblankCapable;
+ NvBool bEnableDynamicGranularityPageArrays;
+ NvBool bClockBoostSupported;
+ NvBool bRouteDispIntrsToCPU;
+ NvU64 hostPageSize;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+ NvU32 preemptiveRemovalPreviousXid;
+} rpc_os_error_log_v17_00;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Flags to help decide GSP-FW flow.
+ NvU8 flags;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[2];
+
+ //
+ // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned
+ // to end where this allocation starts (when RM requests FSP to create
+ // FRTS).
+ //
+ NvU32 pmuReservedSize;
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct {
+ NvU64 sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+ NvBool bDmemStack;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct
+{
+ // Magic for verification by secure ucode
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ // Members regarding data in SYSMEM
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ //
+ // Internal members for use by secure ucode
+ // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes.
+ //
+ NvU32 internal[32];
+
+ // Same as flags of GspFwWprMeta
+ NvU32 flags;
+
+ // Subrevision number used by secure ucode
+ NvU32 subrevision;
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[22];
+} GspFwSRMeta;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u)
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \
+ (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \
+ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA))
+
+typedef struct GSP_FMC_INIT_PARAMS
+{
+ // CC initialization "registry keys"
+ NvU32 regkeys;
+} GSP_FMC_INIT_PARAMS;
+
+typedef enum {
+ GSP_DMA_TARGET_LOCAL_FB,
+ GSP_DMA_TARGET_COHERENT_SYSTEM,
+ GSP_DMA_TARGET_NONCOHERENT_SYSTEM,
+ GSP_DMA_TARGET_COUNT
+} GSP_DMA_TARGET;
+
+typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
+{
+ // Physical memory aperture through which gspRmDescPa is accessed
+ GSP_DMA_TARGET target;
+ // Size in bytes of the GSP-RM descriptor structure
+ NvU32 gspRmDescSize;
+ // Physical offset in the target aperture of the GSP-RM descriptor structure
+ NvU64 gspRmDescOffset;
+ // Physical offset in FB to set the start of the WPR containing GSP-RM
+ NvU64 wprCarveoutOffset;
+ // Size in bytes of the WPR containing GSP-RM
+ NvU32 wprCarveoutSize;
+ // Whether to boot GSP-RM or GSP-Proxy through ACR
+ NvBool bIsGspRmBoot;
+} GSP_ACR_BOOT_GSP_RM_PARAMS;
+
+typedef struct GSP_RM_PARAMS
+{
+ // Physical memory aperture through which bootArgsOffset is accessed
+ GSP_DMA_TARGET target;
+ // Physical offset in the memory aperture that will be passed to GSP-RM
+ NvU64 bootArgsOffset;
+} GSP_RM_PARAMS;
+
+typedef struct GSP_SPDM_PARAMS
+{
+ // Physical Memory Aperture through which all addresses are accessed
+ GSP_DMA_TARGET target;
+
+ // Physical offset in the memory aperture where SPDM payload is stored
+ NvU64 payloadBufferOffset;
+
+ // Size of the above payload buffer
+ NvU32 payloadBufferSize;
+} GSP_SPDM_PARAMS;
+
+typedef struct GSP_FMC_BOOT_PARAMS
+{
+ GSP_FMC_INIT_PARAMS initParams;
+ GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams;
+ GSP_RM_PARAMS gspRmParams;
+ GSP_SPDM_PARAMS gspSpdmParams;
+} GSP_FMC_BOOT_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
new file mode 100644
index 000000000000..e06643f57695
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef E
+# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT, 0x1000)
+ E(GSP_INIT_DONE, 0x1001)
+ E(GSP_RUN_CPU_SEQUENCER, 0x1002)
+ E(POST_EVENT, 0x1003)
+ E(RC_TRIGGERED, 0x1004)
+ E(MMU_FAULT_QUEUED, 0x1005)
+ E(OS_ERROR_LOG, 0x1006)
+ E(RG_LINE_INTR, 0x1007)
+ E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
+ E(SIM_READ, 0x1009)
+ E(SIM_WRITE, 0x100a)
+ E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
+ E(UCODE_LIBOS_PRINT, 0x100c)
+ E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
+ E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
+ E(VGPU_CONFIG, 0x1010)
+ E(DISPLAY_MODESET, 0x1011)
+ E(EXTDEV_INTR_SERVICE, 0x1012)
+ E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
+ E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
+ E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
+ E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
+ E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
+ E(TIMED_SEMAPHORE_RELEASE, 0x1018)
+ E(NVLINK_IS_GPU_DEGRADED, 0x1019)
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
+ E(NVLINK_FAULT_UP, 0x101b)
+ E(GSP_LOCKDOWN_NOTICE, 0x101c)
+ E(MIG_CI_CONFIG_UPDATE, 0x101d)
+ E(UPDATE_GSP_TRACE, 0x101e)
+ E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
+ E(GSP_POST_NOCAT_RECORD, 0x1020)
+ E(FECS_ERROR, 0x1021)
+ E(RECOVERY_ACTION, 0x1022)
+ E(NUM_EVENTS, 0x1023)
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
new file mode 100644
index 000000000000..fcaef7f553a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+ NvU32 engineInstance;
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2d67b598c58b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef X
+# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP, 0)
+ X(RM, SET_GUEST_SYSTEM_INFO, 1)
+ X(RM, ALLOC_ROOT, 2)
+ X(RM, ALLOC_DEVICE, 3) // deprecated
+ X(RM, ALLOC_MEMORY, 4)
+ X(RM, ALLOC_CTX_DMA, 5)
+ X(RM, ALLOC_CHANNEL_DMA, 6)
+ X(RM, MAP_MEMORY, 7)
+ X(RM, BIND_CTX_DMA, 8) // deprecated
+ X(RM, ALLOC_OBJECT, 9)
+ X(RM, FREE, 10)
+ X(RM, LOG, 11)
+ X(RM, ALLOC_VIDMEM, 12)
+ X(RM, UNMAP_MEMORY, 13)
+ X(RM, MAP_MEMORY_DMA, 14)
+ X(RM, UNMAP_MEMORY_DMA, 15)
+ X(RM, GET_EDID, 16) // deprecated
+ X(RM, ALLOC_DISP_CHANNEL, 17)
+ X(RM, ALLOC_DISP_OBJECT, 18)
+ X(RM, ALLOC_SUBDEVICE, 19)
+ X(RM, ALLOC_DYNAMIC_MEMORY, 20)
+ X(RM, DUP_OBJECT, 21)
+ X(RM, IDLE_CHANNELS, 22)
+ X(RM, ALLOC_EVENT, 23)
+ X(RM, SEND_EVENT, 24) // deprecated
+ X(RM, REMAPPER_CONTROL, 25) // deprecated
+ X(RM, DMA_CONTROL, 26) // deprecated
+ X(RM, DMA_FILL_PTE_MEM, 27)
+ X(RM, MANAGE_HW_RESOURCE, 28)
+ X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
+ X(RM, CREATE_FB_SEGMENT, 30)
+ X(RM, DESTROY_FB_SEGMENT, 31)
+ X(RM, ALLOC_SHARE_DEVICE, 32)
+ X(RM, DEFERRED_API_CONTROL, 33)
+ X(RM, REMOVE_DEFERRED_API, 34)
+ X(RM, SIM_ESCAPE_READ, 35)
+ X(RM, SIM_ESCAPE_WRITE, 36)
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
+ X(RM, FREE_VIDMEM_VIRT, 38)
+ X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
+ X(RM, PERF_GET_PERFMON_SAMPLE, 40)
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
+ X(RM, PERF_GET_LEVEL_INFO, 42)
+ X(RM, MAP_SEMA_MEMORY, 43)
+ X(RM, UNMAP_SEMA_MEMORY, 44)
+ X(RM, SET_SURFACE_PROPERTIES, 45)
+ X(RM, CLEANUP_SURFACE, 46)
+ X(RM, UNLOADING_GUEST_DRIVER, 47)
+ X(RM, TDR_SET_TIMEOUT_STATE, 48)
+ X(RM, SWITCH_TO_VGA, 49)
+ X(RM, GPU_EXEC_REG_OPS, 50)
+ X(RM, GET_STATIC_INFO, 51)
+ X(RM, ALLOC_VIRTMEM, 52)
+ X(RM, UPDATE_PDE_2, 53)
+ X(RM, SET_PAGE_DIRECTORY, 54)
+ X(RM, GET_STATIC_PSTATE_INFO, 55)
+ X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
+ X(RM, RESERVED_57, 57)
+ X(RM, RESET_CURRENT_GR_CONTEXT, 58)
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
+ X(RM, GET_ENGINE_UTILIZATION, 60)
+ X(RM, UPDATE_GPU_PDES, 61)
+ X(RM, GET_ENCODER_CAPACITY, 62)
+ X(RM, VGPU_PF_REG_READ32, 63) // deprecated
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
+ X(GSP, GET_GSP_STATIC_INFO, 65)
+ X(RM, RMFS_INIT, 66) // deprecated
+ X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
+ X(RM, RMFS_CLEANUP, 68) // deprecated
+ X(RM, RMFS_TEST, 69) // deprecated
+ X(RM, UPDATE_BAR_PDE, 70)
+ X(RM, CONTINUATION_RECORD, 71)
+ X(RM, GSP_SET_SYSTEM_INFO, 72)
+ X(RM, SET_REGISTRY, 73)
+ X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
+ X(GSP, GSP_RM_CONTROL, 76)
+ X(RM, GET_STATIC_INFO2, 77)
+ X(RM, DUMP_PROTOBUF_COMPONENT, 78)
+ X(RM, UNSET_PAGE_DIRECTORY, 79)
+ X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
+ X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
+ X(RM, CTRL_RESET_CHANNEL, 88)
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
+ X(RM, CTRL_PERF_BOOST, 92)
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
+ X(RM, CTRL_GPFIFO_SCHEDULE, 97)
+ X(RM, CTRL_SET_TIMESLICE, 98)
+ X(RM, CTRL_PREEMPT, 99)
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
+ X(GSP, GSP_RM_ALLOC, 103)
+ X(RM, CTRL_GET_P2P_CAPS_V2, 104)
+ X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
+ X(RM, CTRL_CIPHER_SESSION_KEY, 106)
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
+ X(RM, CTRL_GPU_PROMOTE_CTX, 111)
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
+ X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
+ X(RM, CTRL_GET_CE_PCE_MASK, 121)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
+ X(RM, CTRL_GET_NVLINK_STATUS, 124)
+ X(RM, CTRL_GET_P2P_CAPS, 125)
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
+ X(RM, RESERVED_0, 127)
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
+ X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
+ X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
+ X(RM, CTRL_BIND_PM_RESOURCES, 131)
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
+ X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
+ X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
+ X(RM, CTRL_ALLOC_PMA_STREAM, 140)
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
+ X(RM, CTRL_FB_GET_INFO_V2, 142)
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
+ X(RM, CTRL_GPU_EVICT_CTX, 146)
+ X(RM, CTRL_FB_GET_FS_INFO, 147)
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
+ X(RM, CTRL_STOP_CHANNEL, 149)
+ X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
+ X(RM, CTRL_FREE_PMA_STREAM, 153)
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
+ X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
+ X(RM, DCE_RM_INIT, 168)
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
+ X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
+ X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
+ X(RM, DISABLE_CHANNELS, 178)
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
+ X(RM, CTRL_FABRIC_MEM_STATS, 180)
+ X(RM, SAVE_HIBERNATION_DATA, 181)
+ X(RM, RESTORE_HIBERNATION_DATA, 182)
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
+ X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
+ X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
+ X(RM, CTRL_GET_HS_CREDITS, 197)
+ X(RM, CTRL_SET_HS_CREDITS, 198)
+ X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
+ X(RM, INVALIDATE_TLB, 200)
+ X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
+ X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
+ X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
+ X(RM, RM_API_CONTROL, 204)
+ X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
+ X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
+ X(RM, GET_STATIC_DATA, 207)
+ X(RM, RESERVED_208, 208)
+ X(RM, CTRL_GPU_GET_INFO_V2, 209)
+ X(RM, GET_BRAND_CAPS, 210)
+ X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
+ X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212)
+ X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213)
+ X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214)
+ X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215)
+ X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216)
+ X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217)
+ X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218)
+ X(RM, CTRL_RESERVE_HES, 219)
+ X(RM, CTRL_RELEASE_HES, 220)
+ X(RM, CTRL_RESERVE_CCU_PROF, 221)
+ X(RM, CTRL_RELEASE_CCU_PROF, 222)
+ X(RM, RESERVED, 223)
+ X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224)
+ X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225)
+ X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226)
+ X(RM, NUM_FUNCTIONS, 227)
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
new file mode 100644
index 000000000000..6fb3083edde3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r570_ofa = {
+ .alloc = r570_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
new file mode 100644
index 000000000000..498658d0c60c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gh100 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb10x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb20x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x220000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_api
+r570_api = {
+ .gsp = &r570_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r570_client,
+ .device = &r535_device,
+ .fbsr = &r570_fbsr,
+ .disp = &r570_disp,
+ .fifo = &r570_fifo,
+ .ce = &r535_ce,
+ .gr = &r570_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r570_ofa,
+};
+
+const struct nvkm_rm_impl
+r570_rm_tu102 = {
+ .wpr = &r570_wpr_libos2,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_ga102 = {
+ .wpr = &r570_wpr_libos3,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gh100 = {
+ .wpr = &r570_wpr_libos3_gh100,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb10x = {
+ .wpr = &r570_wpr_libos3_gb10x,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb20x = {
+ .wpr = &r570_wpr_libos3_gb20x,
+ .api = &r570_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
new file mode 100644
index 000000000000..393ea775941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/gsp.h>
+#ifndef __NVKM_RM_H__
+#define __NVKM_RM_H__
+#include "handles.h"
+struct nvkm_outp;
+struct r535_gr;
+
+struct nvkm_rm_impl {
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm {
+ struct nvkm_device *device;
+ const struct nvkm_rm_gpu *gpu;
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm_wpr {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 heap_size_min;
+ u32 heap_size_non_wpr;
+ u32 rsvd_size_pmu;
+ bool offset_set_by_acr;
+};
+
+struct nvkm_rm_api {
+ const struct nvkm_rm_api_gsp {
+ void (*set_rmargs)(struct nvkm_gsp *, bool resume);
+ int (*set_system_info)(struct nvkm_gsp *);
+ int (*get_static_info)(struct nvkm_gsp *);
+ bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst);
+ void (*drop_send_user_shared_data)(struct nvkm_gsp *);
+ void (*drop_post_nocat_record)(struct nvkm_gsp *);
+ u32 (*sr_data_size)(struct nvkm_gsp *);
+ } *gsp;
+
+ const struct nvkm_rm_api_rpc {
+ void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+ void (*done)(struct nvkm_gsp *gsp, void *repv);
+ } *rpc;
+
+ const struct nvkm_rm_api_ctrl {
+ void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size);
+ int (*push)(struct nvkm_gsp_object *, void **params, u32 repc);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+ } *ctrl;
+
+ const struct nvkm_rm_api_alloc {
+ void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size);
+ void *(*push)(struct nvkm_gsp_object *, void *params);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+
+ int (*free)(struct nvkm_gsp_object *);
+ } *alloc;
+
+ const struct nvkm_rm_api_client {
+ int (*ctor)(struct nvkm_gsp_client *, u32 handle);
+ } *client;
+
+ const struct nvkm_rm_api_device {
+ int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*dtor)(struct nvkm_gsp_device *);
+
+ struct {
+ int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*dtor)(struct nvkm_gsp_event *);
+ } event;
+ } *device;
+
+ const struct nvkm_rm_api_fbsr {
+ int (*suspend)(struct nvkm_gsp *);
+ void (*resume)(struct nvkm_gsp *);
+ } *fbsr;
+
+ const struct nvkm_rm_api_disp {
+ int (*get_static_info)(struct nvkm_disp *);
+ int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask);
+ int (*get_connect_state)(struct nvkm_disp *, unsigned display_id);
+ int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id);
+
+ int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val);
+
+ struct {
+ int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm);
+ int (*set_indexed_link_rates)(struct nvkm_outp *);
+ } dp;
+
+ struct {
+ int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst,
+ struct nvkm_memory *);
+ int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *disp;
+
+ const struct nvkm_rm_api_fifo {
+ int (*xlat_rm_engine_type)(u32 rm_engine_type,
+ enum nvkm_subdev_type *, int *nv2080_type);
+ int (*ectx_size)(struct nvkm_fifo *);
+ unsigned rsvd_chids;
+ int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc);
+ struct {
+ int (*alloc)(struct nvkm_gsp_device *, u32 handle,
+ u32 nv2080_engine_type, u8 runq, bool priv, int chid,
+ u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *fifo;
+
+ const struct nvkm_rm_api_engine {
+ int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *);
+ } *ce, *nvdec, *nvenc, *nvjpg, *ofa;
+
+ const struct nvkm_rm_api_gr {
+ int (*get_ctxbufs_info)(struct r535_gr *);
+ struct {
+ int (*init)(struct r535_gr *);
+ void (*fini)(struct r535_gr *);
+ } scrubber;
+ } *gr;
+};
+
+extern const struct nvkm_rm_impl r535_rm_tu102;
+extern const struct nvkm_rm_impl r535_rm_ga102;
+extern const struct nvkm_rm_api_gsp r535_gsp;
+typedef struct DOD_METHOD_DATA DOD_METHOD_DATA;
+typedef struct JT_METHOD_DATA JT_METHOD_DATA;
+typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA;
+void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *);
+void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *);
+void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *);
+struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+void r535_gsp_get_static_info_fb(struct nvkm_gsp *,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *);
+extern const struct nvkm_rm_api_rpc r535_rpc;
+extern const struct nvkm_rm_api_ctrl r535_ctrl;
+extern const struct nvkm_rm_api_alloc r535_alloc;
+extern const struct nvkm_rm_api_client r535_client;
+void r535_gsp_client_dtor(struct nvkm_gsp_client *);
+extern const struct nvkm_rm_api_device r535_device;
+int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external);
+void r535_mmu_vaspace_del(struct nvkm_vmm *);
+extern const struct nvkm_rm_api_fbsr r535_fbsr;
+void r535_fbsr_resume(struct nvkm_gsp *);
+int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target,
+ u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *);
+extern const struct nvkm_rm_api_disp r535_disp;
+extern const struct nvkm_rm_api_fifo r535_fifo;
+void r535_fifo_rc_chid(struct nvkm_fifo *, int chid);
+extern const struct nvkm_rm_api_engine r535_ce;
+extern const struct nvkm_rm_api_gr r535_gr;
+void *r535_gr_dtor(struct nvkm_gr *);
+int r535_gr_oneinit(struct nvkm_gr *);
+u64 r535_gr_units(struct nvkm_gr *);
+int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *,
+ struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma,
+ struct nvkm_gsp_object *chan);
+extern const struct nvkm_rm_api_engine r535_nvdec;
+extern const struct nvkm_rm_api_engine r535_nvenc;
+extern const struct nvkm_rm_api_engine r535_nvjpg;
+extern const struct nvkm_rm_api_engine r535_ofa;
+
+extern const struct nvkm_rm_impl r570_rm_tu102;
+extern const struct nvkm_rm_impl r570_rm_ga102;
+extern const struct nvkm_rm_impl r570_rm_gh100;
+extern const struct nvkm_rm_impl r570_rm_gb10x;
+extern const struct nvkm_rm_impl r570_rm_gb20x;
+extern const struct nvkm_rm_api_gsp r570_gsp;
+extern const struct nvkm_rm_api_client r570_client;
+extern const struct nvkm_rm_api_fbsr r570_fbsr;
+extern const struct nvkm_rm_api_disp r570_disp;
+extern const struct nvkm_rm_api_fifo r570_fifo;
+extern const struct nvkm_rm_api_gr r570_gr;
+int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask);
+int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask);
+extern const struct nvkm_rm_api_engine r570_ofa;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
new file mode 100644
index 000000000000..4431e33b3304
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_RPC_H__
+#define __NVKM_RM_RPC_H__
+#include "rm.h"
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn);
+
+struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len);
+int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv);
+
+int r535_rpc_status_to_errno(uint32_t rpc_status);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
new file mode 100644
index 000000000000..423502f870db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+tu1xx_gpu = {
+ .disp.class = {
+ .root = TU102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = TU102_DISP_CORE_CHANNEL_DMA,
+ .wndw = TU102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = TU102_DISP_CURSOR,
+ },
+
+ .usermode.class = TURING_USERMODE_A,
+
+ .fifo.chan = {
+ .class = TURING_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = TURING_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = TURING_A,
+ .compute = TURING_COMPUTE_A,
+ },
+ .nvdec.class = NVC4B0_VIDEO_DECODER,
+ .nvenc.class = NVC4B7_VIDEO_ENCODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 59c5f2b9172a..58e233bc53b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -22,11 +22,45 @@
#include "priv.h"
#include <subdev/fb.h>
+#include <engine/sec2.h>
+
+#include <rm/r535/nvrm/gsp.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+static int
+tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+}
+
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
@@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp)
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
+int
+tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ if (suspend) {
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+ return 0;
+}
+
+int
+tu102_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = tu102_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ return ret;
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+u64
+tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp)
+{
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+ u64 heap_size;
+
+ heap_size = gsp->rm->wpr->os_carveout_size +
+ gsp->rm->wpr->base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ return max(heap_size, gsp->rm->wpr->heap_size_min);
+}
+
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
@@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
- gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
- return r535_gsp_oneinit(gsp);
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp);
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = tu102_gsp_wpr_meta_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+ return 0;
}
const struct nvkm_falcon_func
@@ -163,29 +362,73 @@ tu102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-tu102_gsp_r535_113_01 = {
+tu102_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
+int
+tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ bool enable_gsp = fwif->enable;
+ int ret;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ return -EINVAL;
+
+ ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
static struct nvkm_gsp_fwif
tu102_gsps[] = {
- { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 04fbd9ed28b1..97eb046c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -22,29 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-tu116_gsp_r535_113_01 = {
+tu116_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
- { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index 553d540f2736..fa7a2862dd1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -4,5 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
nvkm-y += nvkm/subdev/instmem/gk20a.o
-
-nvkm-y += nvkm/subdev/instmem/r535.o
+nvkm-y += nvkm/subdev/instmem/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index a2cd3330efc6..2f55bab8e132 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
int ret;
if (suspend) {
- ret = imem->func->suspend(imem);
- if (ret)
- return ret;
+ if (imem->func->suspend) {
+ ret = imem->func->suspend(imem);
+ if (ret)
+ return ret;
+ }
imem->suspend = true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
new file mode 100644
index 000000000000..8d8dd5f8a6c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/ref/gh100/pri_nv_xal_ep.h>
+
+static void
+gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT);
+}
+
+static const struct nvkm_instmem_func
+gh100_instmem = {
+ .fini = nv50_instmem_fini,
+ .memory_new = nv50_instobj_new,
+ .memory_wrap = nv50_instobj_wrap,
+ .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr,
+};
+
+int
+gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ return r535_instmem_new(&gh100_instmem, device, type, inst, pimem);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 6b462f960922..2544b9f0ec85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
- int bar;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
@@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
*pimem = &imem->base;
/* map bar */
- if (device->func->resource_size(device, 2))
- bar = 2;
- else
- bar = 3;
-
- imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST),
+ device->func->resource_size(device, NVKM_BAR2_INST));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index dd5b5a17ece0..4ca6fb30743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
@@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
@@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */
iobj->bar = bar;
- iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
@@ -353,7 +353,7 @@ nv50_instobj_func = {
.map = nv50_instobj_map,
};
-static int
+int
nv50_instobj_wrap(struct nvkm_instmem *base,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
@@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base,
return 0;
}
-static int
+int
nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
@@ -395,6 +395,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
*****************************************************************************/
static void
+nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, 0x001700, addr >> 16);
+}
+
+void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
@@ -415,6 +421,7 @@ nv50_instmem = {
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
+ .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 4c14c96fb60a..87bbdd786eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,10 +16,16 @@ struct nvkm_instmem_func {
bool zero, struct nvkm_memory **);
int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **);
bool zero;
+ void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr);
};
int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem **);
+void nv50_instmem_fini(struct nvkm_instmem *);
+int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+ struct nvkm_memory **);
+int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram,
+ struct nvkm_memory **bar2);
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 7ba35ea59c06..ea4848931540 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -15,8 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
nvkm-y += nvkm/subdev/mmu/tu102.o
-
-nvkm-y += nvkm/subdev/mmu/r535.o
+nvkm-y += nvkm/subdev/mmu/gh100.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -38,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
nvkm-y += nvkm/subdev/mmu/vmmtu102.o
+nvkm-y += nvkm/subdev/mmu/vmmgh100.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
new file mode 100644
index 000000000000..2918fb32cc91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gh100_mmu = {
+ .dma_bits = 52,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new },
+ .kind = tu102_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
index d9c9bee45222..160a5749a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
index 79a3b0cc9f5b..1e3db52de6cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
- *paddr = device->func->resource_addr(device, 1) + addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr;
*psize = nvkm_memory_size(memory);
*pvma = ERR_PTR(-ENODEV);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
index 46759b89fc1f..33b2321e9d87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index e9ca6537778c..90efef8f0b54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+
struct nvkm_mmu_pt {
union {
struct nvkm_mmu_ptc *ptc;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index df662ce4a4b0..7acff3642e20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static const u8 *
+const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 9c97800fe037..f95c58b67633 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#define NVKM_VMM_LEVELS_MAX 5
+#define NVKM_VMM_LEVELS_MAX 6
#include "vmm.h"
#include <subdev/fb.h>
@@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
- if (vmm->rm.client.gsp) {
- nvkm_gsp_rm_free(&vmm->rm.object);
- nvkm_gsp_device_dtor(&vmm->rm.device);
- nvkm_gsp_client_dtor(&vmm->rm.client);
- nvkm_vmm_put(vmm, &vmm->rm.rsvd);
- }
+ if (vmm->rm.client.gsp)
+ r535_mmu_vaspace_del(vmm);
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f9bc30cdb2b3..4586a425dbe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -143,6 +143,8 @@ struct nvkm_vmm_func {
int (*aper)(enum nvkm_memory_target);
int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
struct nvkm_vmm_map *);
+ int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
@@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void tu102_vmm_flush(struct nvkm_vmm *, int depth);
+
int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
@@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
new file mode 100644
index 000000000000..5614df3432da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_mmu.h>
+
+static inline void
+gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+ .dma = gh100_vmm_pgt_dma,
+ .sgl = gh100_vmm_pgt_sgl,
+};
+
+static void
+gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_lpt = {
+ .invalid = gh100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+};
+
+static inline void
+gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte);
+}
+
+static inline bool
+gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ *data |= pt->addr;
+ return true;
+}
+
+static void
+gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED);
+
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes);
+}
+
+static void
+gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd0 = {
+ .unmap = gh100_vmm_pd0_unmap,
+ .sparse = gh100_vmm_pd0_sparse,
+ .pde = gh100_vmm_pd0_pde,
+ .mem = gh100_vmm_pd0_mem,
+};
+
+static void
+gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gh100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .pde = gh100_vmm_pd1_pde,
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static int
+gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const bool vol = target == NVKM_MEM_TARGET_HOST;
+ const struct nvkm_vmm_page *page = map->page;
+ u8 kind_inv, pcf;
+ int kindn, aper;
+ const u8 *kindm;
+
+ map->next = 1ULL << page->shift;
+ map->type = 0;
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (priv) {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD;
+ }
+ } else {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD;
+ }
+ }
+
+ map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind);
+ return 0;
+}
+
+static const struct nvkm_vmm_func
+gh100_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .valid2 = gh100_vmm_valid,
+ .flush = tu102_vmm_flush,
+ .page = {
+ { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx },
+ { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC },
+ { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index bddac77f48f0..851fd847a2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return ret;
}
+ if (vmm->func->valid2)
+ return vmm->func->valid2(vmm, ro, priv, kind, 0, map);
+
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 8379e72d77ab..4b30eab40bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 174bdf995271..a14ea0f7b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
nvkm-y += nvkm/subdev/pci/gp100.o
+nvkm-y += nvkm/subdev/pci/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 5a0de45d36ce..6867934256a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device)
u32
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
- return pci->func->rd32(pci, addr);
+ return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr);
}
void
nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
- pci->func->wr08(pci, addr, data);
+ nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
void
nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
- pci->func->wr32(pci, addr, data);
+ nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
u32
nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
{
- u32 data = pci->func->rd32(pci, addr);
- pci->func->wr32(pci, addr, (data & ~mask) | value);
+ u32 data = nvkm_pci_rd32(pci, addr);
+ nvkm_pci_wr32(pci, addr, (data & ~mask) | value);
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 5b29aacedef3..5308f6539a3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g84_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index a9e0674009c6..8ae7aa02e675 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g92_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 7bacd0693283..df745d0690ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
g94_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 099906092fe1..6ce941df87b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gf100_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = gf100_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index bcde609ba866..712ca7e0959a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
gf106_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
new file mode 100644
index 000000000000..42da92d7a5fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_xtl_ep_pri.h>
+
+static void
+gh100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ /* Handled by top-level intr ACK. */
+}
+
+static const struct nvkm_pci_func
+gh100_pci = {
+ .cfg = {
+ .addr = DRF_LO(NV_EP_PCFGM),
+ .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1,
+ },
+ .msi_rearm = gh100_pci_msi_rearm,
+};
+
+int
+gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index 6be87ecffc89..ec6d0a7de995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gk104_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gk104_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index a5fafda0014d..4204316a544f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
gp100_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = gp100_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 9ab64194b185..b8a3f6850fa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -23,32 +23,9 @@
*/
#include "priv.h"
-static u32
-nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x001800 + addr);
-}
-
-static void
-nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x001800 + addr, data);
-}
-
-static void
-nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x001800 + addr, data);
-}
-
static const struct nvkm_pci_func
nv04_pci_func = {
- .rd32 = nv04_pci_rd32,
- .wr08 = nv04_pci_wr08,
- .wr32 = nv04_pci_wr32,
+ .cfg = { .addr = 0x001800, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6a3c31cf0200..1971dbbdeb2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -23,27 +23,6 @@
*/
#include "priv.h"
-u32
-nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x088000 + addr);
-}
-
-void
-nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x088000 + addr, data);
-}
-
-void
-nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x088000 + addr, data);
-}
-
void
nv40_pci_msi_rearm(struct nvkm_pci *pci)
{
@@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv40_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv40_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index 9cad17f178ec..0093eabac9ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv46_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv46_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 741e34bf307c..b445081bb80e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -25,9 +25,7 @@
static const struct nvkm_pci_func
nv4c_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 9b7583532962..988eeee1471c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_
struct nvkm_pci **);
struct nvkm_pci_func {
+ struct {
+ u32 addr;
+ u16 size;
+ } cfg;
+
void (*init)(struct nvkm_pci *);
- u32 (*rd32)(struct nvkm_pci *, u16 addr);
- void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
- void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
void (*msi_rearm)(struct nvkm_pci *);
struct {
@@ -27,9 +29,6 @@ struct nvkm_pci_func {
} pcie;
};
-u32 nv40_pci_rd32(struct nvkm_pci *, u16);
-void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
-void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
void nv40_pci_msi_rearm(struct nvkm_pci *);
void nv46_pci_msi_rearm(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
index dce337306cab..d294844d9eae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <rm/gpu.h>
+
static void
r535_vfn_dtor(struct nvkm_vfn *vfn)
{
@@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
struct nvkm_vfn **pvfn)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_vfn_func *rm;
int ret;
@@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
return -ENOMEM;
rm->dtor = r535_vfn_dtor;
- rm->intr = hw->intr;
- rm->user = hw->user;
+ rm->intr = &tu102_vfn_intr;
+ rm->user.addr = 0x030000;
+ rm->user.size = 0x010000;
+ rm->user.base.minver = -1;
+ rm->user.base.maxver = -1;
+ rm->user.base.oclass = gpu->usermode.class;
ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
index c5460a14c541..4e64d8843373 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
@@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
struct nvkm_device *device = vfn->subdev.device;
- *addr = device->func->resource_addr(device, 0) + vfn->addr.user;
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
*size = vfn->func->user.size;
*type = NVKM_OBJECT_MAP_IO;
return 0;
diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig
new file mode 100644
index 000000000000..cca6a3fea879
--- /dev/null
+++ b/drivers/gpu/drm/nova/Kconfig
@@ -0,0 +1,14 @@
+config DRM_NOVA
+ tristate "Nova DRM driver"
+ depends on DRM=y
+ depends on PCI
+ depends on RUST
+ select AUXILIARY_BUS
+ default n
+ help
+ Choose this if you want to build the Nova DRM driver for Nvidia
+ GSP-based GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova.
diff --git a/drivers/gpu/drm/nova/Makefile b/drivers/gpu/drm/nova/Makefile
new file mode 100644
index 000000000000..42019bff3173
--- /dev/null
+++ b/drivers/gpu/drm/nova/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_NOVA) += nova.o
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
new file mode 100644
index 000000000000..b28b2e05cc15
--- /dev/null
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+
+use crate::file::File;
+use crate::gem::NovaObject;
+
+pub(crate) struct NovaDriver {
+ #[expect(unused)]
+ drm: ARef<drm::Device<Self>>,
+}
+
+/// Convienence type alias for the DRM device type for this driver
+pub(crate) type NovaDevice = drm::Device<NovaDriver>;
+
+#[pin_data]
+pub(crate) struct NovaData {
+ pub(crate) adev: ARef<auxiliary::Device>,
+}
+
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 0,
+ minor: 0,
+ patchlevel: 0,
+ name: c_str!("nova"),
+ desc: c_str!("Nvidia Graphics"),
+};
+
+const NOVA_CORE_MODULE_NAME: &CStr = c_str!("NovaCore");
+const AUXILIARY_NAME: &CStr = c_str!("nova-drm");
+
+kernel::auxiliary_device_table!(
+ AUX_TABLE,
+ MODULE_AUX_TABLE,
+ <NovaDriver as auxiliary::Driver>::IdInfo,
+ [(
+ auxiliary::DeviceId::new(NOVA_CORE_MODULE_NAME, AUXILIARY_NAME),
+ ()
+ )]
+);
+
+impl auxiliary::Driver for NovaDriver {
+ type IdInfo = ();
+ const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
+
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ let data = try_pin_init!(NovaData { adev: adev.into() });
+
+ let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
+ drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
+
+ Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ }
+}
+
+#[vtable]
+impl drm::Driver for NovaDriver {
+ type Data = NovaData;
+ type File = File;
+ type Object = gem::Object<NovaObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (NOVA_GETPARAM, drm_nova_getparam, ioctl::RENDER_ALLOW, File::get_param),
+ (NOVA_GEM_CREATE, drm_nova_gem_create, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_create),
+ (NOVA_GEM_INFO, drm_nova_gem_info, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_info),
+ }
+}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
new file mode 100644
index 000000000000..7e59a34b830d
--- /dev/null
+++ b/drivers/gpu/drm/nova/file.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::{NovaDevice, NovaDriver};
+use crate::gem::NovaObject;
+use crate::uapi::{GemCreate, GemInfo, Getparam};
+use kernel::{
+ alloc::flags::*,
+ drm::{self, gem::BaseObject},
+ pci,
+ prelude::*,
+ types::Opaque,
+ uapi,
+};
+
+pub(crate) struct File;
+
+impl drm::file::DriverFile for File {
+ type Driver = NovaDriver;
+
+ fn open(_dev: &NovaDevice) -> Result<Pin<KBox<Self>>> {
+ Ok(KBox::new(Self, GFP_KERNEL)?.into())
+ }
+}
+
+impl File {
+ /// IOCTL: get_param: Query GPU / driver metadata.
+ pub(crate) fn get_param(
+ dev: &NovaDevice,
+ getparam: &Opaque<uapi::drm_nova_getparam>,
+ _file: &drm::File<File>,
+ ) -> Result<u32> {
+ let adev = &dev.adev;
+ let parent = adev.parent().ok_or(ENOENT)?;
+ let pdev: &pci::Device = parent.try_into()?;
+ let getparam: &Getparam = getparam.into();
+
+ let value = match getparam.param() as u32 {
+ uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
+ _ => return Err(EINVAL),
+ };
+
+ getparam.set_value(value);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_create: Create a new DRM GEM object.
+ pub(crate) fn gem_create(
+ dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_create>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemCreate = req.into();
+ let obj = NovaObject::new(dev, req.size().try_into()?)?;
+
+ req.set_handle(obj.create_handle(file)?);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_info: Query GEM metadata.
+ pub(crate) fn gem_info(
+ _dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_info>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemInfo = req.into();
+ let bo = NovaObject::lookup_handle(file, req.handle())?;
+
+ req.set_size(bo.size().try_into()?);
+
+ Ok(0)
+ }
+}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
new file mode 100644
index 000000000000..33b62d21400c
--- /dev/null
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ drm,
+ drm::{gem, gem::BaseObject},
+ prelude::*,
+ types::ARef,
+};
+
+use crate::{
+ driver::{NovaDevice, NovaDriver},
+ file::File,
+};
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct NovaObject {}
+
+impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+ fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(NovaObject {})
+ }
+}
+
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+}
+
+impl NovaObject {
+ /// Create a new DRM GEM object.
+ pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
+ let aligned_size = size.next_multiple_of(1 << 12);
+
+ if size == 0 || size > aligned_size {
+ return Err(EINVAL);
+ }
+
+ gem::Object::new(dev, aligned_size)
+ }
+
+ /// Look up a GEM object handle for a `File` and return an `ObjectRef` for it.
+ #[inline]
+ pub(crate) fn lookup_handle(
+ file: &drm::File<File>,
+ handle: u32,
+ ) -> Result<ARef<gem::Object<Self>>> {
+ gem::Object::lookup_handle(file, handle)
+ }
+}
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
new file mode 100644
index 000000000000..64fd670e99e1
--- /dev/null
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova DRM Driver
+
+mod driver;
+mod file;
+mod gem;
+mod uapi;
+
+use crate::driver::NovaDriver;
+
+kernel::module_auxiliary_driver! {
+ type: NovaDriver,
+ name: "Nova",
+ authors: ["Danilo Krummrich"],
+ description: "Nova GPU driver",
+ license: "GPL v2",
+}
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
new file mode 100644
index 000000000000..eb228a58d423
--- /dev/null
+++ b/drivers/gpu/drm/nova/uapi.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::uapi;
+
+// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
+
+macro_rules! define_uapi_abstraction {
+ ($name:ident <= $inner:ty) => {
+ #[repr(transparent)]
+ pub struct $name(::kernel::types::Opaque<$inner>);
+
+ impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
+ fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
+ // SAFETY: `Self` is a transparent wrapper of `$inner`.
+ unsafe { ::core::mem::transmute(value) }
+ }
+ }
+ };
+}
+
+define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
+
+impl Getparam {
+ pub fn param(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).param }
+ }
+
+ pub fn set_value(&self, v: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).value = v };
+ }
+}
+
+define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
+
+impl GemCreate {
+ pub fn size(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).size }
+ }
+
+ pub fn set_handle(&self, handle: u32) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).handle = handle };
+ }
+}
+
+define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
+
+impl GemInfo {
+ pub fn handle(&self) -> u32 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).handle }
+ }
+
+ pub fn set_size(&self, size: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).size = size };
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 533f70e8a4a6..cf055815077c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -524,7 +524,7 @@ static void dispc_save_context(struct dispc_device *dispc)
DSSDBG("context saved\n");
}
-static void dispc_restore_context(struct dispc_device *dispc)
+static noinline_for_stack void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b17e77f700dd..9f86db774c39 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
@@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge,
dpi_init_pll(dpi);
- return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ return drm_bridge_attach(encoder, dpi->output.next_bridge,
bridge, flags);
}
@@ -561,7 +562,6 @@ static const struct drm_bridge_funcs dpi_bridge_funcs = {
static void dpi_bridge_init(struct dpi_data *dpi)
{
- dpi->bridge.funcs = &dpi_bridge_funcs;
dpi->bridge.of_node = dpi->pdev->dev.of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
@@ -706,9 +706,9 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datalines;
int r;
- dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(&pdev->dev, struct dpi_data, bridge, &dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9b9cc593790c..b129e5a8d791 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = {
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
@@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ return drm_bridge_attach(encoder, dsi->output.next_bridge,
bridge, flags);
}
@@ -4700,7 +4701,6 @@ static const struct drm_bridge_funcs dsi_bridge_funcs = {
static void dsi_bridge_init(struct dsi_data *dsi)
{
- dsi->bridge.funcs = &dsi_bridge_funcs;
dsi->bridge.of_node = dsi->host.dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
@@ -4893,9 +4893,9 @@ static int dsi_probe(struct platform_device *pdev)
unsigned int i;
int r;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct dsi_data, bridge, &dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e1ac447221ee..3cd612af2449 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
@@ -504,7 +505,6 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi4_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -760,9 +760,9 @@ static int hdmi4_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi4_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -773,25 +773,24 @@ static int hdmi4_probe(struct platform_device *pdev)
r = hdmi4_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -799,7 +798,7 @@ static int hdmi4_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -807,7 +806,7 @@ static int hdmi4_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -826,8 +825,6 @@ err_uninit_output:
hdmi4_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -840,8 +837,6 @@ static void hdmi4_remove(struct platform_device *pdev)
hdmi4_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index fa9904e4c218..5636b3dfec1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
@@ -479,7 +480,6 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi5_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -726,9 +726,9 @@ static int hdmi5_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi5_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -739,25 +739,24 @@ static int hdmi5_probe(struct platform_device *pdev)
r = hdmi5_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -765,7 +764,7 @@ static int hdmi5_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -773,7 +772,7 @@ static int hdmi5_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -792,8 +791,6 @@ err_uninit_output:
hdmi5_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -806,8 +803,6 @@ static void hdmi5_remove(struct platform_device *pdev)
hdmi5_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index f9ae358e8e52..df4cbc683e2c 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ return drm_bridge_attach(encoder, sdi->output.next_bridge,
bridge, flags);
}
@@ -283,7 +284,6 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
static void sdi_bridge_init(struct sdi_device *sdi)
{
- sdi->bridge.funcs = &sdi_bridge_funcs;
sdi->bridge.of_node = sdi->pdev->dev.of_node;
sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
@@ -343,21 +343,19 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datapairs;
int r;
- sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
- if (!sdi)
- return -ENOMEM;
+ sdi = devm_drm_bridge_alloc(&pdev->dev, struct sdi_device, bridge, &sdi_bridge_funcs);
+ if (IS_ERR(sdi))
+ return PTR_ERR(sdi);
ep = of_graph_get_next_port_endpoint(port, NULL);
- if (!ep) {
- r = 0;
- goto err_free;
- }
+ if (!ep)
+ return 0;
r = of_property_read_u32(ep, "datapairs", &datapairs);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_free;
+ return r;
}
sdi->datapairs = datapairs;
@@ -371,19 +369,14 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
r = PTR_ERR(sdi->vdds_sdi_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
- goto err_free;
+ return r;
}
r = sdi_init_output(sdi);
if (r)
- goto err_free;
+ return r;
return 0;
-
-err_free:
- kfree(sdi);
-
- return r;
}
void sdi_uninit_port(struct device_node *port)
@@ -394,5 +387,4 @@ void sdi_uninit_port(struct device_node *port)
return;
sdi_uninit_output(sdi);
- kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index aaeef603682c..9b5d53dc361e 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc)
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
@@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ return drm_bridge_attach(encoder, venc->output.next_bridge,
bridge, flags);
}
@@ -663,7 +664,6 @@ static const struct drm_bridge_funcs venc_bridge_funcs = {
static void venc_bridge_init(struct venc_device *venc)
{
- venc->bridge.funcs = &venc_bridge_funcs;
venc->bridge.of_node = venc->pdev->dev.of_node;
venc->bridge.ops = DRM_BRIDGE_OP_MODES;
venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
@@ -808,9 +808,9 @@ static int venc_probe(struct platform_device *pdev)
struct venc_device *venc;
int r;
- venc = kzalloc(sizeof(*venc), GFP_KERNEL);
- if (!venc)
- return -ENOMEM;
+ venc = devm_drm_bridge_alloc(&pdev->dev, struct venc_device, bridge, &venc_bridge_funcs);
+ if (IS_ERR(venc))
+ return PTR_ERR(venc);
venc->pdev = pdev;
@@ -823,26 +823,24 @@ static int venc_probe(struct platform_device *pdev)
venc->config = &venc_config_pal_trm;
venc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(venc->base)) {
- r = PTR_ERR(venc->base);
- goto err_free;
- }
+ if (IS_ERR(venc->base))
+ return PTR_ERR(venc->base);
venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(venc->vdda_dac_reg)) {
r = PTR_ERR(venc->vdda_dac_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
- goto err_free;
+ return r;
}
r = venc_get_clocks(venc);
if (r)
- goto err_free;
+ return r;
r = venc_probe_of(venc);
if (r)
- goto err_free;
+ return r;
pm_runtime_enable(&pdev->dev);
@@ -860,8 +858,6 @@ err_uninit_output:
venc_uninit_output(venc);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(venc);
return r;
}
@@ -874,8 +870,6 @@ static void venc_remove(struct platform_device *pdev)
venc_uninit_output(venc);
pm_runtime_disable(&pdev->dev);
-
- kfree(venc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 449d521c78fe..bb3105556f19 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -335,10 +335,9 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
#endif
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
unsigned int num_planes = info->num_planes;
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
@@ -352,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
}
}
- fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
@@ -366,9 +365,9 @@ error:
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
@@ -378,14 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_get_format_info(dev, mode_cmd);
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
- if (!format || i == ARRAY_SIZE(formats)) {
+ if (i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- omap_fb->format = format;
+ omap_fb->format = info;
mutex_init(&omap_fb->lock);
/*
@@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
- if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+ if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
- if (pitch % format->cpp[0]) {
+ if (pitch % info->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
- pitch, format->cpp[0]);
+ pitch, info->cpp[0]);
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < format->num_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
- unsigned int vsub = i == 0 ? 1 : format->vsub;
+ unsigned int vsub = i == 0 ? 1 : info->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
@@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index b75f0b5ef1d8..e6010302a22b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_device;
struct drm_file;
struct drm_framebuffer;
+struct drm_format_info;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane_state;
@@ -20,8 +21,10 @@ struct omap_overlay_info;
struct seq_file;
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 7b6396890681..948af7ec1130 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = omap_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index b9c67e4ca360..381552bfb409 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -8,7 +8,6 @@
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
-#include <linux/pfn_t.h>
#include <linux/vmalloc.h>
#include <drm/drm_prime.h>
@@ -371,8 +370,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address, pfn);
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -467,8 +465,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- ret = vmf_insert_mixed(vma,
- vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed(vma, vaddr, pfn);
if (ret & VM_FAULT_ERROR)
break;
pfn += priv->usergart[fmt].stride_pfn;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index e059b06e0239..09b9f7ff9340 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,6 +67,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TD4320
+ tristate "BOE TD4320 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for BOE TD4320 1080x2340
+ video mode panel found in Xiaomi Redmi Note 7 smartphones.
+
config DRM_PANEL_BOE_TH101MB31UIG002_28A
tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
@@ -154,6 +163,17 @@ config DRM_PANEL_LVDS
handling of power supplies or control signals. It implements automatic
backlight handling if the panel is attached to a backlight controller.
+config DRM_PANEL_HIMAX_HX8279
+ tristate "Himax HX8279-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8279 controller, such as the Startek KD070FHFID078
+ 7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface
+ and others.
+
config DRM_PANEL_HIMAX_HX83102
tristate "Himax HX83102-based panels"
depends on OF
@@ -173,6 +193,16 @@ config DRM_PANEL_HIMAX_HX83112A
Say Y here if you want to enable support for Himax HX83112A-based
display panels, such as the one found in the Fairphone 4 smartphone.
+config DRM_PANEL_HIMAX_HX83112B
+ tristate "Himax HX83112B-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want to enable support for Himax HX83112B-based
+ display panels, such as the one found in the Fairphone 3 smartphone.
+
config DRM_PANEL_HIMAX_HX8394
tristate "HIMAX HX8394 MIPI-DSI LCD panels"
depends on OF
@@ -497,6 +527,18 @@ config DRM_PANEL_NOVATEK_NT36672E
LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
RGB per pixel.
+config DRM_PANEL_NOVATEK_NT37801
+ tristate "Novatek NT37801/NT37810 AMOLED DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Novatek NT37801 (or
+ NT37810) AMOLED DSI Video Mode LCD panel module with 1440x3200
+ resolution.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -615,6 +657,32 @@ config DRM_PANEL_RAYDIUM_RM69380
This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
in combination with an EDO OLED panel.
+config DRM_PANEL_RENESAS_R61307
+ tristate "Renesas R61307 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for KOE tx13d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 1024x768
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus Vu P895 smartphone
+ in combination with LCD panel.
+
+config DRM_PANEL_RENESAS_R69328
+ tristate "Renesas R69328 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI dx12d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 720x1280
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus 4X P895 smartphone
+ in combination with LCD panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
@@ -996,6 +1064,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_G2647FB105
+ tristate "Visionox G2647FB105"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Visionox
+ G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel.
+
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 1bb8ae46b59b..957555b49996 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TD4320) += panel-boe-td4320.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
@@ -16,8 +17,10 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
@@ -49,6 +52,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37801) += panel-novatek-nt37801.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -62,6 +66,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R61307) += panel-renesas-r61307.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R69328) += panel-renesas-r69328.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o
@@ -101,6 +107,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 4692c36fe217..87fb0fd29658 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi)
struct y030xx067a *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel,
+ &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 503ecea72c5e..ea5119018df4 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
- if (!vpanel)
- return -ENOMEM;
+ vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel,
+ &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(vpanel))
+ return PTR_ERR(vpanel);
ret = regmap_read(map, SYS_CLCD, &val);
if (ret) {
@@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&vpanel->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b05a663c134c..db006576d704 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
struct tm5p5_nt35596 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel,
+ &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd";
ctx->supplies[1].supply = "vddio";
@@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight)) {
ret = PTR_ERR(ctx->panel.backlight);
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 77604d6a4e72..6e52bf6830e1 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi)
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel,
+ &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi)
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
- drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 7e66db4a88bb..84c21c62a43e 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- mipi_dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
-
- msleep(30);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ return dsi_ctx.accum_err;
}
-static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
+static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
/* OFF commands sent in HS mode */
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
int ret;
/*
@@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
ret = boe_bf060y8m_aj0_on(boe);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- return ret;
+ goto err_on;
}
return 0;
+err_on:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
err_vci:
regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
err_vddio:
@@ -178,15 +158,11 @@ err_elvss:
static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
- int ret;
- ret = boe_bf060y8m_aj0_off(boe);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ boe_bf060y8m_aj0_off(boe);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
+ regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
return 0;
}
@@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
@@ -350,9 +324,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
struct boe_bf060y8m_aj0 *boe;
int ret;
- boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel,
+ &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
if (ret)
@@ -374,9 +350,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
boe->panel.prepare_prev_first = true;
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index df746baae301..4a8560b4b899 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -847,9 +847,6 @@ static int panel_add(struct panel_info *pinfo)
"failed to get enable gpio\n");
}
- drm_panel_init(&pinfo->base, dev, &panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return ret;
@@ -865,9 +862,11 @@ static int panel_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-boe-td4320.c b/drivers/gpu/drm/panel/panel-boe-td4320.c
new file mode 100644
index 000000000000..1956daa2c71b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-td4320.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Barnabas Czeman <barnabas.czeman@mainlining.org>
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_td4320 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data boe_td4320_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct boe_td4320 *to_boe_td4320(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_td4320, panel);
+}
+
+static void boe_td4320_reset(struct boe_td4320 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int boe_td4320_on(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb8,
+ 0x19, 0x55, 0x00, 0xbe, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9,
+ 0x4d, 0x55, 0x05, 0xe6, 0x00, 0x02,
+ 0x03);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba,
+ 0x9b, 0x5b, 0x07, 0xe6, 0x00, 0x13,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf9,
+ 0x44, 0x3f, 0x00, 0x8d, 0xbf);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xce,
+ 0x5d, 0x00, 0x0f, 0x1f, 0x2f, 0x3f,
+ 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f,
+ 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff,
+ 0x04, 0x00, 0x02, 0x02, 0x42, 0x01,
+ 0x69, 0x5a, 0x40, 0x40, 0x00, 0x00,
+ 0x04, 0xfa, 0x00);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00b8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_off(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_prepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ boe_td4320_reset(ctx);
+
+ ret = boe_td4320_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_td4320_unprepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = boe_td4320_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_td4320_mode = {
+ .clock = (1080 + 86 + 2 + 100) * (2340 + 4 + 4 + 60) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 86,
+ .hsync_end = 1080 + 86 + 2,
+ .htotal = 1080 + 86 + 2 + 100,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 4,
+ .vsync_end = 2340 + 4 + 4,
+ .vtotal = 2340 + 4 + 4 + 60,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_td4320_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &boe_td4320_mode);
+}
+
+static const struct drm_panel_funcs boe_td4320_panel_funcs = {
+ .prepare = boe_td4320_prepare,
+ .unprepare = boe_td4320_unprepare,
+ .get_modes = boe_td4320_get_modes,
+};
+
+static int boe_td4320_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_td4320 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct boe_td4320, panel,
+ &boe_td4320_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(boe_td4320_supplies),
+ boe_td4320_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_td4320_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_td4320 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_td4320_of_match[] = {
+ { .compatible = "boe,td4320" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_td4320_of_match);
+
+static struct mipi_dsi_driver boe_td4320_driver = {
+ .probe = boe_td4320_probe,
+ .remove = boe_td4320_remove,
+ .driver = {
+ .name = "panel-boe-td4320",
+ .of_match_table = boe_td4320_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_td4320_driver);
+
+MODULE_AUTHOR("Barnabas Czeman <barnabas.czeman@mainlining.org>");
+MODULE_DESCRIPTION("DRM driver for boe td4320 fhdplus video mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 0b87f1e6ecae..f33d4f855929 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel,
+ &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, ret,
"Failed to get orientation\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
index 50e4a5341bc6..20b6e11a7d84 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
struct boe_tv101wum_ll2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel,
+ &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(boe_tv101wum_ll2_supplies),
@@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE;
- drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 3e5b0d8636d0..d5fe105bdbdd 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1720,8 +1720,6 @@ static int boe_panel_add(struct boe_panel *boe)
boe->base.prepare_prev_first = true;
- drm_panel_init(&boe->base, dev, &boe_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -1746,9 +1744,11 @@ static int boe_panel_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(&dsi->dev, __typeof(*boe), base,
+ &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index 6b3f4d664d2a..ae6e9ffc46cb 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dev_dbg(dev, "probe\n");
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel,
+ &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ddata))
+ return PTR_ERR(ddata);
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
@@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dsicm_hw_reset(ddata);
- drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 0bfed0ec0bbc..fb9f9f42be4f 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
struct ebbg_ft8719 *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel,
+ &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 52028c8f8988..9a56e208cbdd 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -113,7 +113,7 @@ struct panel_delay {
* // do fixed enable delay
* // enforce prepare_to_enable min time
*
- * This is not specified in a standard way on eDP timing diagrams.
+ * This is usually (T4+T5+T6+T8)-min on eDP timing diagrams.
* It is effectively the time from HPD going high till you can
* turn on the backlight.
*/
@@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
struct device_node *ddc;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_edp, base,
+ &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->prepared_time = 0;
panel->desc = desc;
@@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
dev_set_drvdata(dev, panel);
- drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
goto err_finished_ddc_init;
@@ -1763,6 +1762,13 @@ static const struct panel_delay delay_80_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_80_500_e80_p2e200 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 80,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_100_500_e200 = {
.hpd_absent = 100,
.unprepare = 500,
@@ -1863,6 +1869,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x30ed, &delay_200_500_e50, "G156HAN03.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
@@ -1878,6 +1885,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1916,6 +1924,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0964, &delay_200_500_e50, "NV133WUM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
@@ -1930,6 +1939,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
@@ -1938,6 +1948,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1957,6 +1968,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1965,6 +1977,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
@@ -1973,6 +1986,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -1996,6 +2010,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x1220, &delay_200_500_e50, "KD116N3730A05"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index b904d5437444..1f177834d629 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -206,9 +206,10 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
struct kd35t133 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct kd35t133, panel,
+ &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -248,9 +249,6 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 986e3e192881..6225501cb174 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -443,9 +443,11 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
unsigned int i;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct k101_im2ba02, panel,
+ &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -463,9 +465,6 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 48e3acaecdf3..4f8d6d8c07e4 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -189,16 +189,14 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
struct feiyang *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct feiyang, panel,
+ &feiyang_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
new file mode 100644
index 000000000000..fb302d1f91b9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX8279 DriverIC panels driver
+ *
+ * Copyright (c) 2025 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+/* Page selection */
+#define HX8279_REG_PAGE 0xb0
+ #define HX8279_PAGE_SEL GENMASK(3, 0)
+
+/* Page 0 - Driver/Module Configuration */
+#define HX8279_P0_VGHS 0xbf
+#define HX8279_P0_VGLS 0xc0
+#define HX8279_P0_VGPHS 0xc2
+#define HX8279_P0_VGNHS 0xc4
+ #define HX8279_P0_VG_SEL GENMASK(4, 0)
+ #define HX8279_VGH_MIN_MV 8700
+ #define HX8279_VGH_STEP_MV 300
+ #define HX8279_VGL_MIN_MV 6700
+ #define HX8279_VGL_STEP_MV 300
+ #define HX8279_VGPNH_MIN_MV 4000
+ #define HX8279_VGPNX_STEP_MV 50
+ #define HX8279_VGH_VOLT_SEL(x) ((x - HX8279_VGH_MIN_MV) / HX8279_VGH_STEP_MV)
+ #define HX8279_VGL_VOLT_SEL(x) ((x - HX8279_VGL_MIN_MV) / HX8279_VGL_STEP_MV)
+ #define HX8279_VGPN_VOLT_SEL(x) ((x - HX8279_VGPNH_MIN_MV) / HX8279_VGPNX_STEP_MV)
+
+/* Page 1 - Gate driver On Array (GOA) Mux */
+#define HX8279_P1_REG_GOA_L 0xc0
+#define HX8279_P1_REG_GOUTL(x) (HX8279_P1_REG_GOA_L + (x))
+#define HX8279_P1_REG_GOA_R 0xd4
+#define HX8279_P1_REG_GOUTR(x) (HX8279_P1_REG_GOA_R + (x))
+ #define HX8279_GOUT_STB GENMASK(7, 6)
+ #define HX8279_GOUT_SEL GENMASK(5, 0)
+
+/* Page 2 - Analog Gamma Configuration */
+#define HX8279_P2_REG_ANALOG_GAMMA 0xc0
+ #define HX8279_P2_REG_GAMMA_T_PVP(x) (HX8279_P2_REG_ANALOG_GAMMA + (x)) /* 0..16 */
+ #define HX8279_P2_REG_GAMMA_T_PVN(x) (HX8279_P2_REG_GAMMA_T_PVP(17) + (x)) /* 0..16 */
+
+/* Page 3 - Gate driver On Array (GOA) Configuration */
+#define HX8279_P3_REG_UNKNOWN_BA 0xba
+#define HX8279_P3_REG_GOA_CKV_FALL_PREC 0xbc
+#define HX8279_P3_REG_GOA_TIMING_ODD 0xc2
+ #define HX8279_P3_REG_GOA_TO(x) (HX8279_P3_REG_GOA_TIMING_ODD + x) /* GOA_T0..5 */
+#define HX8279_P3_REG_GOA_STVL 0xc8
+ #define HX8279_P3_GOA_STV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVL 0xc9
+ #define HX8279_P3_GOA_CKV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVD 0xca
+ #define HX8279_P3_GOA_CKV_NONOVERLAP BIT(7)
+ #define HX8279_P3_GOA_CKV_RESERVED BIT(6)
+ #define HX8279_P3_GOA_CKV_DUMMY GENMASK(5, 0)
+#define HX8279_P3_REG_GOA_CKV_RISE_PREC 0xcb
+#define HX8279_P3_REG_GOA_CLR1_W_ADJ 0xd2
+#define HX8279_P3_REG_GOA_CLR234_W_ADJ 0xd3
+#define HX8279_P3_REG_GOA_CLR1_CFG 0xd4
+#define HX8279_P3_REG_GOA_CLR_CFG(x) (HX8279_P3_REG_GOA_CLR1_CFG + (x)) /* CLR1..4 */
+ #define HX8279_P3_GOA_CLR_CFG_POLARITY BIT(7)
+ #define HX8279_P3_GOA_CLR_CFG_STARTPOS GENMASK(6, 0)
+#define HX8279_P3_REG_GOA_TIMING_EVEN 0xdd
+ #define HX8279_P3_REG_GOA_TE(x) (HX8279_P3_REG_GOA_TIMING_EVEN + x)
+#define HX8279_P3_REG_UNKNOWN_E4 0xe4
+#define HX8279_P3_REG_UNKNOWN_E5 0xe5
+
+/* Page 5 - MIPI */
+#define HX8279_P5_REG_TIMING 0xb3
+ #define HX8279_P5_TIMING_THS_SETTLE GENMASK(7, 5)
+ #define HX8279_P5_TIMING_LHS_SETTLE BIT(4)
+ #define HX8279_P5_TIMING_TLPX GENMASK(3, 0)
+#define HX8279_P5_REG_UNKNOWN_B8 0xb8
+#define HX8279_P5_REG_UNKNOWN_BC 0xbc
+#define HX8279_P5_REG_UNKNOWN_D6 0xd6
+
+/* Page 6 - Engineer */
+#define HX8279_P6_REG_ENGINEER_PWD 0xb8
+#define HX8279_P6_REG_INHOUSE_FUNC 0xc0
+ #define HX8279_P6_ENG_UNLOCK_WORD 0xa5
+#define HX8279_P6_REG_GAMMA_CHOPPER 0xbc
+ #define HX8279_P6_GAMMA_POCGM_CTL GENMASK(6, 4)
+ #define HX8279_P6_GAMMA_POGCMD_CTL GENMASK(2, 0)
+#define HX8279_P6_REG_VOLT_ADJ 0xc7
+ /* For VCCIFS and VCCS - 0: 1450, 1: 1500, 2: 1550, 3: 1600 uV */
+ #define HX8279_P6_VOLT_ADJ_VCCIFS GENMASK(3, 2)
+ #define HX8279_P6_VOLT_ADJ_VCCS GENMASK(1, 0)
+#define HX8279_P6_REG_DLY_TIME_ADJ 0xd5
+
+/* Page 7...12 - Digital Gamma Adjustment */
+#define HX8279_PG_DIGITAL_GAMMA 0xb1
+#define HX8279_DGAMMA_DGMA1_HI GENMASK(7, 6)
+#define HX8279_DGAMMA_DGMA2_HI GENMASK(5, 4)
+#define HX8279_DGAMMA_DGMA3_HI GENMASK(3, 2)
+#define HX8279_DGAMMA_DGMA4_HI GENMASK(1, 0)
+#define HX8279_PG_DGAMMA_NUM_LO_BYTES 24
+#define HX8279_PG_DGAMMA_NUM_HI_BYTES 6
+
+struct hx8279 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data vregs[2];
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ const struct hx8279_panel_desc *desc;
+ u8 last_page;
+ bool skip_voltage_config;
+ bool skip_goa_config;
+ bool skip_goa_timing;
+ bool skip_goa_even_timing;
+ bool skip_mipi_timing;
+};
+
+struct hx8279_panel_mode {
+ const struct drm_display_mode mode;
+ u8 bpc;
+ bool is_video_mode;
+};
+
+/**
+ * struct hx8279_goa_mux - Gate driver On Array Muxer
+ * @gout_l: Mux GOA signal to GOUT Left pin
+ * @gout_r: Mux GOA signal to GOUT Right pin
+ */
+struct hx8279_goa_mux {
+ u8 gout_l[20];
+ u8 gout_r[20];
+};
+
+/**
+ * struct hx8279_analog_gamma - Analog Gamma Adjustment
+ * @pos: Positive gamma op's input voltage, adjusted by VGP(H/L)
+ * @neg: Negative gamma op's input voltage, adjusted by VGN(H/L)
+ *
+ * Analog Gamma correction is performed with 17+17 reference voltages,
+ * changed with resistor streams, and defined with 17 register values
+ * for positive and 17 for negative.
+ *
+ * Each register holds resistance values, in 8.5ohms per unit, for the
+ * following gamma levels:
+ * 0, 8, 16, 28, 40, 56, 80, 128, 176, 200, 216, 228, 240, 248, 252, 255.
+ */
+struct hx8279_analog_gamma {
+ u8 pos[17];
+ u8 neg[17];
+};
+
+/**
+ * struct hx8279_digital_gamma - Digital Gamma Adjustment
+ * @r: Adjustment for red component
+ * @g: Adjustment for green component
+ * @b: Adjustment for blue component
+ *
+ * The layout of this structure follows the register layout to simplify
+ * both the handling and the declaration of those values in the driver.
+ * Gamma correction is internally done with a 24 segment piecewise
+ * linear interpolation; those segments are defined with 24 ten bits
+ * values of which:
+ * - The LOW eight bits for the first 24 registers start at the first
+ * register (at 0xb1) of the Digital Gamma Adjustment page;
+ * - The HIGH two bits for each of the 24 registers are contained
+ * in the last six registers;
+ * - The last six registers contain four groups of two-bits HI values
+ * for each of the first 24 registers, but in an inverted fashion,
+ * this means that the first two bits relate to the last register
+ * of a set of four.
+ *
+ * The 24 segments refer to the following gamma levels:
+ * 0, 1, 3, 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160,
+ * 192, 208, 224, 232, 240, 244, 248, 252, 254, 255
+ */
+struct hx8279_digital_gamma {
+ u8 r[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 g[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 b[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+};
+
+struct hx8279_panel_desc {
+ const struct mipi_dsi_device_info dsi_info;
+ const struct hx8279_panel_mode *mode_data;
+ u8 num_lanes;
+ u8 num_modes;
+
+ /* Page 0 */
+ unsigned int vgh_mv;
+ unsigned int vgl_mv;
+ unsigned int vgph_mv;
+ unsigned int vgnh_mv;
+
+ /* Page 1 */
+ const struct hx8279_goa_mux *gmux;
+
+ /* Page 2 */
+ const struct hx8279_analog_gamma *agamma;
+
+ /* Page 3 */
+ u8 goa_unk_ba;
+ u8 goa_odd_timing[6];
+ u8 goa_even_timing[6];
+ u8 goa_stv_lead_time_ck;
+ u8 goa_ckv_lead_time_ck;
+ u8 goa_ckv_dummy_vblank_num;
+ u8 goa_ckv_rise_precharge;
+ u8 goa_ckv_fall_precharge;
+ bool goa_ckv_non_overlap_ctl;
+ u8 goa_clr1_width_adj;
+ u8 goa_clr234_width_adj;
+ s8 goa_clr_polarity[4];
+ int goa_clr_start_pos[4];
+ u8 goa_unk_e4;
+ u8 goa_unk_e5;
+
+ /* Page 5 */
+ u8 bta_tlpx;
+ bool lhs_settle_time_by_osc25;
+ u8 ths_settle_time;
+ u8 timing_unk_b8;
+ u8 timing_unk_bc;
+ u8 timing_unk_d6;
+
+ /* Page 6 */
+ u8 gamma_ctl;
+ u8 volt_adj;
+ u8 src_delay_time_adj_ck;
+
+ /* Page 7..12 */
+ const struct hx8279_digital_gamma *dgamma;
+};
+
+static inline struct hx8279 *to_hx8279(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8279, panel);
+}
+
+static void hx8279_set_page(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx, u8 page)
+{
+ const u8 cmd_set_page[] = { HX8279_REG_PAGE, page };
+
+ if (hx->last_page == page)
+ return;
+
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_page, ARRAY_SIZE(cmd_set_page));
+ if (!dsi_ctx->accum_err)
+ hx->last_page = page;
+}
+
+static void hx8279_set_module_config(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_voltage[2];
+
+ if (hx->skip_voltage_config)
+ return;
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_page(hx, dsi_ctx, 0);
+
+ if (desc->vgh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGHS;
+ cmd_set_voltage[1] = HX8279_VGH_VOLT_SEL(desc->vgh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgl_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGLS;
+ cmd_set_voltage[1] = HX8279_VGL_VOLT_SEL(desc->vgl_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgph_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGPHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgph_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgnh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGNHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgnh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+}
+
+static void hx8279_set_gmux(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_goa_mux *gmux = hx->desc->gmux;
+ u8 cmd_set_gmux[2];
+ int i;
+
+ if (!gmux)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 1);
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTL(i);
+ cmd_set_gmux[1] = gmux->gout_l[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTR(i);
+ cmd_set_gmux[1] = gmux->gout_r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+}
+
+static void hx8279_set_analog_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_analog_gamma *agamma = hx->desc->agamma;
+ u8 cmd_set_ana_gamma[2];
+ int i;
+
+ if (!agamma)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 2);
+
+ for (i = 0; i < ARRAY_SIZE(agamma->pos); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVP(i);
+ cmd_set_ana_gamma[1] = agamma->pos[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(agamma->neg); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVN(i);
+ cmd_set_ana_gamma[1] = agamma->neg[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+}
+
+static void hx8279_set_goa_timing(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa_t[2];
+ int i;
+
+ if (hx->skip_goa_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TO(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TE(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+}
+
+static void hx8279_set_goa_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa[2];
+ int i;
+
+ if (hx->skip_goa_config)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ if (desc->goa_unk_ba) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_BA;
+ cmd_set_goa[1] = desc->goa_unk_ba;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_stv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_STVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_STV_LEAD,
+ desc->goa_stv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_DUMMY,
+ desc->goa_ckv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_dummy_vblank_num) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVD;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_LEAD,
+ desc->goa_ckv_dummy_vblank_num);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CKV_NONOVERLAP,
+ desc->goa_ckv_non_overlap_ctl);
+ /* RESERVED must be always set */
+ cmd_set_goa[1] |= HX8279_P3_GOA_CKV_RESERVED;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /*
+ * One of the two being more than zero means that we want to write
+ * both of them. Anyway, the register default is zero in this case.
+ */
+ if (desc->goa_ckv_rise_precharge || desc->goa_ckv_fall_precharge) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_RISE_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_rise_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_FALL_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_fall_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr1_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR1_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr1_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr234_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR234_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr234_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /* Polarity and Start Position arrays are of the same size */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (desc->goa_clr_polarity[i] < 0 || desc->goa_clr_start_pos[i] < 0)
+ continue;
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR_CFG(i);
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CLR_CFG_STARTPOS,
+ desc->goa_clr_start_pos[i]);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CLR_CFG_POLARITY,
+ desc->goa_clr_polarity[i]);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_unk_e4) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E4;
+ cmd_set_goa[1] = desc->goa_unk_e4;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E5;
+ cmd_set_goa[1] = desc->goa_unk_e5;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+}
+
+static void hx8279_set_mipi_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_mipi[2];
+
+ if (hx->skip_mipi_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 5);
+
+ if (desc->bta_tlpx || desc->ths_settle_time || desc->lhs_settle_time_by_osc25) {
+ cmd_set_mipi[0] = HX8279_P5_REG_TIMING;
+ cmd_set_mipi[1] = FIELD_PREP(HX8279_P5_TIMING_TLPX, desc->bta_tlpx);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_THS_SETTLE,
+ desc->ths_settle_time);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_LHS_SETTLE,
+ desc->lhs_settle_time_by_osc25);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_b8) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_B8;
+ cmd_set_mipi[1] = desc->timing_unk_b8;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_bc) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_BC;
+ cmd_set_mipi[1] = desc->timing_unk_bc;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_d6) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_D6;
+ cmd_set_mipi[1] = desc->timing_unk_d6;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+}
+
+static void hx8279_set_adv_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const u8 cmd_set_dly[] = { HX8279_P6_REG_DLY_TIME_ADJ, desc->src_delay_time_adj_ck };
+ const u8 cmd_set_gamma[] = { HX8279_P6_REG_GAMMA_CHOPPER, desc->gamma_ctl };
+ const u8 cmd_set_volt_adj[] = { HX8279_P6_REG_VOLT_ADJ, desc->volt_adj };
+ u8 cmd_set_eng[] = { HX8279_P6_REG_ENGINEER_PWD, HX8279_P6_ENG_UNLOCK_WORD };
+
+ if (!desc->gamma_ctl && !desc->src_delay_time_adj_ck && !desc->volt_adj)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 6);
+
+ /* Unlock ENG settings: write same word to both ENGINEER_PWD and INHOUSE_FUNC */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ /* Set Gamma Chopper and Gamma buffer Chopper control */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gamma, ARRAY_SIZE(cmd_set_gamma));
+
+ /* Set Source delay time adjustment (CKV falling to Source off) */
+ if (desc->src_delay_time_adj_ck)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dly,
+ ARRAY_SIZE(cmd_set_dly));
+
+ /* Set voltage adjustment */
+ if (desc->volt_adj)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_volt_adj,
+ ARRAY_SIZE(cmd_set_volt_adj));
+
+ /* Lock ENG settings again */
+ cmd_set_eng[0] = HX8279_P6_REG_ENGINEER_PWD;
+ cmd_set_eng[1] = 0;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+}
+
+static void hx8279_set_digital_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_digital_gamma *dgamma = hx->desc->dgamma;
+ u8 cmd_set_dig_gamma[2];
+ int i;
+
+ if (!dgamma)
+ return;
+
+ /*
+ * Pages 7..9 are for RGB Positive, 10..12 are for RGB Negative:
+ * The first iteration sets all positive component registers,
+ * the second one sets all negatives.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 pg_neg = i * 3;
+
+ hx8279_set_page(hx, dsi_ctx, 7 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->r); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 8 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->g); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->g[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 9 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->b); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->b[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+ }
+}
+
+static int hx8279_on(struct hx8279 *hx)
+{
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Page 5 */
+ hx8279_set_mipi_cfg(hx, &dsi_ctx);
+
+ /* Page 1 */
+ hx8279_set_gmux(hx, &dsi_ctx);
+
+ /* Page 2 */
+ hx8279_set_analog_gamma(hx, &dsi_ctx);
+
+ /* Page 3 */
+ hx8279_set_goa_cfg(hx, &dsi_ctx);
+ hx8279_set_goa_timing(hx, &dsi_ctx);
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_module_config(hx, &dsi_ctx);
+
+ /* Page 6 */
+ hx8279_set_adv_cfg(hx, &dsi_ctx);
+
+ /* Pages 7..12 */
+ hx8279_set_digital_gamma(hx, &dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void hx8279_power_off(struct hx8279 *hx)
+{
+ gpiod_set_value_cansleep(hx->reset_gpio, 0);
+ usleep_range(100, 500);
+ gpiod_set_value_cansleep(hx->enable_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(hx->vregs), hx->vregs);
+}
+
+static int hx8279_disable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_enable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_prepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx->vregs), hx->vregs);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(hx->enable_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(hx->reset_gpio, 1);
+ usleep_range(6000, 7000);
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = hx8279_on(hx);
+ if (ret) {
+ hx8279_power_off(hx);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_unprepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ hx8279_power_off(hx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ int i;
+
+ for (i = 0; i < hx->desc->num_modes; i++) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hx->desc->mode_data[i].mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ if (hx->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = hx->desc->mode_data[0].bpc;
+ connector->display_info.height_mm = hx->desc->mode_data[0].mode.height_mm;
+ connector->display_info.width_mm = hx->desc->mode_data[0].mode.width_mm;
+
+ return hx->desc->num_modes;
+}
+
+static const struct drm_panel_funcs hx8279_panel_funcs = {
+ .disable = hx8279_disable,
+ .unprepare = hx8279_unprepare,
+ .prepare = hx8279_prepare,
+ .enable = hx8279_enable,
+ .get_modes = hx8279_get_modes,
+};
+
+static int hx8279_init_vregs(struct hx8279 *hx, struct device *dev)
+{
+ int ret;
+
+ hx->vregs[0].supply = "vdd";
+ hx->vregs[1].supply = "iovcc";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->vregs),
+ hx->vregs);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_is_supported_voltage(hx->vregs[0].consumer,
+ 3000000, 5000000);
+ if (!ret)
+ return -EINVAL;
+
+ ret = regulator_is_supported_voltage(hx->vregs[1].consumer,
+ 1700000, 1900000);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hx8279_check_gmux_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const struct hx8279_goa_mux *gmux = desc->gmux;
+ int i;
+
+ /* No gmux defined means we simply skip the GOA mux configuration */
+ if (!gmux)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ if (gmux->gout_l[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_l[%d]\n", i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ if (gmux->gout_r[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_r[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static int hx8279_check_goa_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ bool goa_odd_valid, goa_even_valid;
+ int i, num_zero, num_clr = 0;
+
+ /* Up to 4 zero values is a valid configuration. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ if (desc->goa_odd_timing[i])
+ num_zero++;
+ }
+
+ goa_odd_valid = (num_zero != ARRAY_SIZE(desc->goa_odd_timing));
+
+ /* Up to 3 zeroes is a valid config. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ if (desc->goa_even_timing[i])
+ num_zero++;
+ }
+
+ goa_even_valid = (num_zero != ARRAY_SIZE(desc->goa_even_timing));
+
+ /* Programming one without the other would make no sense! */
+ if (goa_odd_valid != goa_even_valid)
+ return -EINVAL;
+
+ /* We know that both are either true or false now, check just one */
+ if (!goa_odd_valid)
+ hx->skip_goa_timing = true;
+
+ if (!desc->goa_unk_ba && !desc->goa_stv_lead_time_ck &&
+ !desc->goa_ckv_lead_time_ck && !desc->goa_ckv_dummy_vblank_num &&
+ !desc->goa_ckv_rise_precharge && !desc->goa_ckv_fall_precharge &&
+ !desc->goa_clr1_width_adj && !desc->goa_clr234_width_adj &&
+ !desc->goa_unk_e4 && !desc->goa_unk_e5) {
+ hx->skip_goa_config = true;
+ return 0;
+ }
+
+ if ((desc->goa_stv_lead_time_ck > HX8279_P3_GOA_STV_LEAD) ||
+ (desc->goa_ckv_lead_time_ck > HX8279_P3_GOA_CKV_LEAD) ||
+ (desc->goa_ckv_dummy_vblank_num > HX8279_P3_GOA_CKV_DUMMY))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid lead timings in GOA config\n");
+
+ /*
+ * Don't perform zero check for polarity and start position, as
+ * both pol=0 and start=0 are valid configuration values.
+ */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_start_pos); i++) {
+ if (desc->goa_clr_start_pos[i] < 0)
+ continue;
+ else if (desc->goa_clr_start_pos[i] > HX8279_P3_GOA_CLR_CFG_STARTPOS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid start position for CLR%d\n", i + 1);
+ else
+ num_clr++;
+ }
+ if (!num_clr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (num_clr < 0)
+ return -EINVAL;
+
+ if (desc->goa_clr_polarity[i] < 0)
+ continue;
+ else if (desc->goa_clr_polarity[i] > 1)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid polarity for CLR%d\n", i + 1);
+ else
+ num_clr--;
+ }
+
+ return 0;
+}
+
+static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u8 *component)
+{
+ u8 gamma_high_bits[4];
+ u16 prev_val = 0;
+ int i, j, k, x;
+
+ /*
+ * The gamma values are 10 bits long and shall be incremental
+ * to form a digital gamma correction reference curve.
+ *
+ * As for the registers format: the first 24 bytes contain each the
+ * lowest 8 bits for each of the gamma level references, and the last
+ * 6 bytes contain the high two bits of 4 registers at a time, where
+ * the first two bits are relative to the last register, and the last
+ * two are relative to the first register.
+ *
+ * Another way of saying, those are the first four LOW values:
+ * DGMA1_LO = 0xb1, DGMA2_LO = 0xb2, DGMA3_LO = 0xb3, DGMA4_LO = 0xb4
+ *
+ * The high values for those four are at DGMA1_4_HI = 0xc9;
+ * ...and DGMA1_4_HI's data contains the following bits:
+ * [1:0] = DGMA4_HI, [3:2] = DGMA3_HI, [5:4] = DGMA2_HI, [7:6] = DGMA1_HI
+ */
+ for (i = 0; i < HX8279_PG_DGAMMA_NUM_HI_BYTES; i++) {
+ k = HX8279_PG_DGAMMA_NUM_LO_BYTES + i;
+ j = i * 4;
+ x = 0;
+
+ gamma_high_bits[0] = FIELD_GET(HX8279_DGAMMA_DGMA1_HI, component[k]);
+ gamma_high_bits[1] = FIELD_GET(HX8279_DGAMMA_DGMA2_HI, component[k]);
+ gamma_high_bits[2] = FIELD_GET(HX8279_DGAMMA_DGMA3_HI, component[k]);
+ gamma_high_bits[3] = FIELD_GET(HX8279_DGAMMA_DGMA4_HI, component[k]);
+
+ do {
+ u16 cur_val = component[j] | (gamma_high_bits[x] << 8);
+
+ if (cur_val < prev_val)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid dgamma values: %u < %u!\n",
+ cur_val, prev_val);
+ prev_val = cur_val;
+ j++;
+ x++;
+ } while (x < 4);
+ };
+
+ return 0;
+}
+
+static int hx8279_check_params(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ int ret;
+
+ /* Voltages config validation */
+ if (!desc->vgh_mv && !desc->vgl_mv && !desc->vgph_mv && !desc->vgnh_mv)
+ hx->skip_voltage_config = true;
+ else if ((desc->vgh_mv && desc->vgh_mv < HX8279_VGH_MIN_MV) ||
+ (desc->vgl_mv && desc->vgl_mv < HX8279_VGL_MIN_MV) ||
+ (desc->vgph_mv && desc->vgph_mv < HX8279_VGPNH_MIN_MV) ||
+ (desc->vgnh_mv && desc->vgnh_mv < HX8279_VGPNH_MIN_MV))
+ return -EINVAL;
+
+ /* GOA Muxing validation */
+ ret = hx8279_check_gmux_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* GOA Configuration validation */
+ ret = hx8279_check_goa_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* MIPI Configuration validation */
+ if (!desc->bta_tlpx && !desc->lhs_settle_time_by_osc25 &&
+ !desc->ths_settle_time && !desc->timing_unk_b8 &&
+ !desc->timing_unk_bc && !desc->timing_unk_d6)
+ hx->skip_mipi_timing = true;
+
+ /* ENG/Gamma Configuration validation */
+ if (desc->gamma_ctl > (HX8279_P6_GAMMA_POCGM_CTL | HX8279_P6_GAMMA_POGCMD_CTL))
+ return -EINVAL;
+
+ /* Digital Gamma values validation */
+ if (desc->dgamma) {
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->r);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->g);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->b);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx8279_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct hx8279 *hx;
+ int i, ret;
+
+ hx = devm_drm_panel_alloc(dev, struct hx8279, panel,
+ &hx8279_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hx))
+ return PTR_ERR(hx);
+
+ ret = hx8279_init_vregs(hx, dev);
+ if (ret)
+ return ret;
+
+ hx->desc = device_get_match_data(dev);
+ if (!hx->desc)
+ return -ENODEV;
+
+ /*
+ * In some DriverICs some or all fields may be OTP: perform a
+ * basic configuration check before writing to help avoiding
+ * irreparable mistakes.
+ *
+ * Please note that this is not perfect and will only check if
+ * the values may be plausible; values that are wrong for a
+ * specific display, but still plausible for DrIC config will
+ * be accepted.
+ */
+ ret = hx8279_check_params(hx, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid DriverIC configuration\n");
+
+ /* The enable line may be always tied to VCCIO, so this is optional */
+ hx->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(hx->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ hx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(hx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (dsi_r) {
+ const struct mipi_dsi_device_info *info = &hx->desc->dsi_info;
+ struct mipi_dsi_host *dsi_r_host;
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ hx->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, info);
+ if (IS_ERR(hx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(hx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+ mipi_dsi_set_drvdata(hx->dsi[1], hx);
+ }
+
+ hx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, hx);
+
+ ret = drm_panel_of_backlight(&hx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hx->panel);
+
+ for (i = 0; i < 2; i++) {
+ if (!hx->dsi[i])
+ continue;
+
+ hx->dsi[i]->lanes = hx->desc->num_lanes;
+ hx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+
+ hx->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ if (hx->desc->mode_data[0].is_video_mode)
+ hx->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ret = devm_mipi_dsi_attach(dev, hx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&hx->panel);
+ return dev_err_probe(dev, ret,
+ "Cannot attach to DSI%d host.\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void hx8279_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8279 *hx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&hx->panel);
+}
+
+static const struct hx8279_panel_mode aoly_sl101pm1794fog_v15_modes[] = {
+ {
+ .mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_panel_mode startek_kd070fhfid078_modes[] = {
+ {
+ .mode = {
+ .clock = 156458,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 50,
+ .hsync_end = 1200 + 50 + 24,
+ .htotal = 1200 + 50 + 24 + 66,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 14,
+ .vsync_end = 1920 + 14 + 2,
+ .vtotal = 1920 + 14 + 2 + 10,
+ .width_mm = 95,
+ .height_mm = 151,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_goa_mux aoly_sl101pm1794fog_v15_gmux = {
+ .gout_l = { 0x5, 0x5, 0xb, 0xb, 0x9, 0x9, 0x16, 0x16, 0xe, 0xe,
+ 0x7, 0x7, 0x26, 0x26, 0x15, 0x15, 0x1, 0x1, 0x3, 0x3 },
+ .gout_r = { 0x6, 0x6, 0xc, 0xc, 0xa, 0xa, 0x16, 0x16, 0xe, 0xe,
+ 0x8, 0x8, 0x26, 0x26, 0x15, 0x15, 0x2, 0x2, 0x4, 0x4 },
+};
+
+static const struct hx8279_analog_gamma aoly_sl101pm1794fog_v15_ana_gamma = {
+ .pos = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x1c, 0x2c, 0x33, 0x31,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+ .neg = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+};
+
+static const struct hx8279_digital_gamma aoly_sl101pm1794fog_v15_dig_gamma = {
+ .r = { 0x0, 0x5, 0x10, 0x22, 0x36, 0x4a, 0x6c, 0x9a, 0xd7, 0x17,
+ 0x92, 0x15, 0x18, 0x8c, 0x0, 0x3a, 0x72, 0x8c, 0xa5, 0xb1,
+ 0xbe, 0xca, 0xd1, 0xd4, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .g = { 0x4, 0x5, 0x11, 0x24, 0x39, 0x4e, 0x72, 0xa3, 0xe1, 0x25,
+ 0xa8, 0x2e, 0x32, 0xad, 0x28, 0x63, 0x9b, 0xb5, 0xcf, 0xdb,
+ 0xe8, 0xf5, 0xfa, 0xfc, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .b = { 0x4, 0x4, 0xf, 0x22, 0x37, 0x4d, 0x71, 0xa2, 0xe1, 0x26,
+ 0xa9, 0x2f, 0x33, 0xac, 0x24, 0x5d, 0x94, 0xac, 0xc5, 0xd1,
+ 0xdc, 0xe8, 0xed, 0xf0, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+};
+
+static const struct hx8279_panel_desc aoly_sl101pm1794fog_v15 = {
+ .dsi_info = {
+ .type = "L101PM1794FOG-V15",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = aoly_sl101pm1794fog_v15_modes,
+ .num_modes = ARRAY_SIZE(aoly_sl101pm1794fog_v15_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 16500,
+ .vgl_mv = 11200,
+ .vgph_mv = 4600,
+ .vgnh_mv = 4600,
+
+ /* Analog Gamma correction */
+ .agamma = &aoly_sl101pm1794fog_v15_ana_gamma,
+
+ /* Gate driver On Array (GOA) Muxing */
+ .gmux = &aoly_sl101pm1794fog_v15_gmux,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .goa_unk_ba = 0xf0,
+ .goa_odd_timing = { 0, 0, 0, 42, 0, 0 },
+ .goa_even_timing = { 1, 42, 0, 0 },
+ .goa_stv_lead_time_ck = 11,
+ .goa_ckv_lead_time_ck = 7,
+ .goa_ckv_dummy_vblank_num = 3,
+ .goa_ckv_rise_precharge = 1,
+ .goa_clr1_width_adj = 0,
+ .goa_clr234_width_adj = 0,
+ .goa_clr_polarity = { 1, 0, 0, 0 },
+ .goa_clr_start_pos = { 8, 9, 3, 4 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x0d,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0xa5,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = 0,
+ .volt_adj = FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCIFS, 3) |
+ FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCS, 3),
+ .src_delay_time_adj_ck = 50,
+
+ /* Digital Gamma Adjustment */
+ .dgamma = &aoly_sl101pm1794fog_v15_dig_gamma,
+};
+
+static const struct hx8279_goa_mux startek_kd070fhfid078_gmux = {
+ .gout_l = { 0xd, 0xd, 0x6, 0x6, 0x8, 0x8, 0xa, 0xa, 0xc, 0xc,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x4, 0x4, 0x0, 0x0 },
+ .gout_r = { 0xd, 0xd, 0x5, 0x5, 0x7, 0x7, 0x9, 0x9, 0xb, 0xb,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x3, 0x3, 0x0, 0x0 },
+};
+
+static const struct hx8279_panel_desc startek_kd070fhfid078 = {
+ .dsi_info = {
+ .type = "KD070FHFID078",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = startek_kd070fhfid078_modes,
+ .num_modes = ARRAY_SIZE(startek_kd070fhfid078_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 18000,
+ .vgl_mv = 12100,
+ .vgph_mv = 5500,
+ .vgnh_mv = 5500,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .gmux = &startek_kd070fhfid078_gmux,
+
+ /* Gate driver On Array (GOA) Configuration */
+ .goa_unk_ba = 0xf0,
+ .goa_stv_lead_time_ck = 7,
+ .goa_ckv_lead_time_ck = 3,
+ .goa_ckv_dummy_vblank_num = 1,
+ .goa_ckv_rise_precharge = 0,
+ .goa_ckv_fall_precharge = 0,
+ .goa_clr1_width_adj = 1,
+ .goa_clr234_width_adj = 5,
+ .goa_clr_polarity = { 0, 1, -1, -1 },
+ .goa_clr_start_pos = { 5, 10, -1, -1 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x00,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0x7f,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = FIELD_PREP_CONST(HX8279_P6_GAMMA_POCGM_CTL, 1) |
+ FIELD_PREP_CONST(HX8279_P6_GAMMA_POGCMD_CTL, 1),
+ .src_delay_time_adj_ck = 72,
+};
+
+static const struct of_device_id hx8279_of_match[] = {
+ { .compatible = "aoly,sl101pm1794fog-v15", .data = &aoly_sl101pm1794fog_v15 },
+ { .compatible = "startek,kd070fhfid078", .data = &startek_kd070fhfid078 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8279_of_match);
+
+static struct mipi_dsi_driver hx8279_driver = {
+ .probe = hx8279_probe,
+ .remove = hx8279_remove,
+ .driver = {
+ .name = "panel-himax-hx8279",
+ .of_match_table = hx8279_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8279_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Himax HX8279 DriverIC panels driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 66abfc44e424..4c432d207634 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -989,8 +989,6 @@ static int hx83102_panel_add(struct hx83102 *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &hx83102_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (err < 0)
return dev_err_probe(dev, err, "failed to get orientation\n");
@@ -1013,9 +1011,11 @@ static int hx83102_probe(struct mipi_dsi_device *dsi)
int ret;
const struct hx83102_panel_desc *desc;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &hx83102_drm_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
index 47bce087e339..142cb1cc067a 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -269,9 +269,11 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
struct hx83112a_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx83112a_panel, panel,
+ &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd1";
ctx->supplies[1].supply = "vsn";
@@ -295,8 +297,6 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112b.c b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
new file mode 100644
index 000000000000..263f79a967de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2025 Luca Weiss <luca@lucaweiss.eu>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define HX83112B_SETPOWER1 0xb1
+#define HX83112B_SETDISP 0xb2
+#define HX83112B_SETDRV 0xb4
+#define HX83112B_SETEXTC 0xb9
+#define HX83112B_SETBANK 0xbd
+#define HX83112B_SETDGCLUT 0xc1
+#define HX83112B_SETDISMO 0xc2
+#define HX83112B_UNKNOWN1 0xc6
+#define HX83112B_SETPANEL 0xcc
+#define HX83112B_UNKNOWN2 0xd1
+#define HX83112B_SETPOWER2 0xd2
+#define HX83112B_SETGIP0 0xd3
+#define HX83112B_SETGIP1 0xd5
+#define HX83112B_SETGIP2 0xd6
+#define HX83112B_SETGIP3 0xd8
+#define HX83112B_SETIDLE 0xdd
+#define HX83112B_UNKNOWN3 0xe7
+#define HX83112B_UNKNOWN4 0xe9
+
+struct hx83112b_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data hx83112b_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct hx83112b_panel *to_hx83112b_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx83112b_panel, panel);
+}
+
+static void hx83112b_reset(struct hx83112b_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int hx83112b_on(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETEXTC, 0x83, 0x11, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x04, 0x38, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER1,
+ 0xf8, 0x27, 0x27, 0x00, 0x00, 0x0b, 0x0e,
+ 0x0b, 0x0e, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER2, 0x2d, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP,
+ 0x80, 0x02, 0x18, 0x80, 0x70, 0x00, 0x08,
+ 0x1c, 0x08, 0x11, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0xb5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETIDLE,
+ 0x00, 0x00, 0x08, 0x1c, 0x08, 0x34, 0x34,
+ 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV,
+ 0x65, 0x6b, 0x00, 0x00, 0xd0, 0xd4, 0x36,
+ 0xcf, 0x06, 0xce, 0x00, 0xce, 0x00, 0x00,
+ 0x00, 0x07, 0x00, 0x2a, 0x07, 0x01, 0x07,
+ 0x00, 0x00, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV, 0x01, 0x67, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0,
+ 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x13, 0x40, 0x04, 0x09,
+ 0x09, 0x0b, 0x0b, 0x32, 0x10, 0x08, 0x00,
+ 0x08, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32,
+ 0x10, 0x08, 0x00, 0x08, 0x00, 0x00, 0x0a,
+ 0x08, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xef);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP1,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x18,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xfc,
+ 0xfc, 0x00, 0x00, 0xfc, 0xfc, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP2,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x19,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa,
+ 0xab, 0xaf, 0xef, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xab, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xae, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xba, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xba, 0xaa,
+ 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xe4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x17, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x09, 0x09, 0x00, 0x07, 0xe8, 0x00, 0x26,
+ 0x00, 0x07, 0x00, 0x00, 0xe8, 0x32, 0x00,
+ 0xe9, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x12, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x02, 0x00, 0x01, 0x20, 0x01, 0x18, 0x08,
+ 0xa8, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x20, 0x20, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x00, 0xdc, 0x11, 0x70, 0x00, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x2a, 0xce, 0x02, 0x70, 0x01, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN2, 0x27);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_off(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_prepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ hx83112b_reset(ctx);
+
+ ret = hx83112b_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx83112b_unprepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = hx83112b_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode hx83112b_mode = {
+ .clock = (1080 + 40 + 4 + 12) * (2160 + 32 + 2 + 2) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 40,
+ .hsync_end = 1080 + 40 + 4,
+ .htotal = 1080 + 40 + 4 + 12,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 32,
+ .vsync_end = 2160 + 32 + 2,
+ .vtotal = 2160 + 32 + 2 + 2,
+ .width_mm = 65,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int hx83112b_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &hx83112b_mode);
+}
+
+static const struct drm_panel_funcs hx83112b_panel_funcs = {
+ .prepare = hx83112b_prepare,
+ .unprepare = hx83112b_unprepare,
+ .get_modes = hx83112b_get_modes,
+};
+
+static int hx83112b_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops hx83112b_bl_ops = {
+ .update_status = hx83112b_bl_update_status,
+};
+
+static struct backlight_device *
+hx83112b_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &hx83112b_bl_ops, &props);
+}
+
+static int hx83112b_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx83112b_panel *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct hx83112b_panel, panel,
+ &hx83112b_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(hx83112b_supplies),
+ hx83112b_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = hx83112b_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hx83112b_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx83112b_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx83112b_of_match[] = {
+ { .compatible = "djn,98-03057-6598b-i" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx83112b_of_match);
+
+static struct mipi_dsi_driver hx83112b_driver = {
+ .probe = hx83112b_probe,
+ .remove = hx83112b_remove,
+ .driver = {
+ .name = "panel-himax-hx83112b",
+ .of_match_table = hx83112b_of_match,
+ },
+};
+module_mipi_dsi_driver(hx83112b_driver);
+
+MODULE_DESCRIPTION("DRM driver for hx83112b-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index 92b03a2f65a3..c4d3e09a228d 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -80,7 +80,7 @@ struct hx8394_panel_desc {
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
- int (*init_sequence)(struct hx8394 *ctx);
+ void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
@@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
return container_of(panel, struct hx8394, panel);
}
-static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x78, 0x0c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
- 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
- 0x7c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
- 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
- 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
- 0x00, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
- 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
- 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
- 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
- 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
- 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
- 0x4a, 0x4c, 0x4b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x7d, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0xed);
}
static const struct drm_display_mode hsd060bhw4_mode = {
@@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
-static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
- 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
- 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
- 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
- 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
- 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
- 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
- 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
- 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
- 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
- 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
- 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
- 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode powkiddy_x55_mode = {
@@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
-static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* DCS commands do not seem to be sent correclty without this delay */
- msleep(20);
+ mipi_dsi_msleep(dsi_ctx, 20);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
- 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
- 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
- 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
- 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
- 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
- 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
- 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
- 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
- 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
- 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
- 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
- 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
- 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
- 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
- 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet (C0H) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
/* Set CABC control (C9h)*/
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
- 0x76, 0x00, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet (D4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* 5.19.11 Set register bank (D8h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet (C6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode mchp_ac40t08a_mode = {
@@ -489,39 +477,176 @@ static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
.init_sequence = mchp_ac40t08a_init_sequence,
};
+/*
+ * HL055FHAV028C is based on Himax HX8399, so datasheet pages are
+ * slightly different than HX8394 based panels.
+ */
+static void hl055fhav028c_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
+{
+ /* 6.3.6 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x99);
+
+ /* 6.3.17 SETOFFSET: Set offset voltage (D2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETOFFSET,
+ 0x77);
+
+ /* 6.3.1 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x02, 0x04, 0x74, 0x94, 0x01, 0x32,
+ 0x33, 0x11, 0x11, 0xab, 0x4d, 0x56,
+ 0x73, 0x02, 0x02);
+
+ /* 6.3.2 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x80, 0xae, 0x05, 0x07,
+ 0x5a, 0x11, 0x00, 0x00, 0x10, 0x1e,
+ 0x70, 0x03, 0xd4);
+
+ /* 6.3.3 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x00, 0xff, 0x02, 0xc0, 0x02, 0xc0,
+ 0x00, 0x00, 0x08, 0x00, 0x04, 0x06,
+ 0x00, 0x32, 0x04, 0x0a, 0x08, 0x21,
+ 0x03, 0x01, 0x00, 0x0f, 0xb8, 0x8b,
+ 0x02, 0xc0, 0x02, 0xc0, 0x00, 0x00,
+ 0x08, 0x00, 0x04, 0x06, 0x00, 0x32,
+ 0x04, 0x0a, 0x08, 0x01, 0x00, 0x0f,
+ 0xb8, 0x01);
+
+ /* 6.3.18 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x10, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 0x05, 0x07, 0x00, 0x00,
+ 0x00, 0x05, 0x40);
+
+ /* 6.3.19 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x21, 0x20, 0x01, 0x00, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 6.3.20 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x40, 0x40,
+ 0x20, 0x21, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x00, 0x01, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x40, 0x40,
+ 0x40, 0x40);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xa2, 0xaa, 0x02, 0xa0, 0xa2, 0xa8,
+ 0x02, 0xa0, 0xb0, 0x00, 0x00, 0x00,
+ 0xb0, 0x00, 0x00, 0x00);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xb0, 0x00, 0x00, 0x00, 0xb0, 0x00,
+ 0x00, 0x00, 0xe2, 0xaa, 0x03, 0xf0,
+ 0xe2, 0xaa, 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xe2, 0xaa, 0x03, 0xf0, 0xe2, 0xaa,
+ 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 6.3.4 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7a, 0x7a);
+
+ /* 6.3.26 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x18, 0x27, 0x24, 0x5a, 0x68,
+ 0x79, 0x78, 0x81, 0x8a, 0x92, 0x99,
+ 0x9e, 0xa7, 0xaf, 0xb4, 0xb9, 0xc3,
+ 0xc7, 0xd1, 0xc6, 0xd4, 0xd5, 0x6c,
+ 0x67, 0x71, 0x77, 0x00, 0x00, 0x18,
+ 0x27, 0x24, 0x5a, 0x68, 0x79, 0x78,
+ 0x81, 0x8a, 0x92, 0x99, 0x9e, 0xa7,
+ 0xaf, 0xb4, 0xb9, 0xc3, 0xc7, 0xd1,
+ 0xc6, 0xd4, 0xd5, 0x6c, 0x67, 0x77);
+
+ /* Unknown command, not listed in the HX8399-C datasheet (C6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xff, 0xf9);
+
+ /* 6.3.16 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x08);
+}
+
+static const struct drm_display_mode hl055fhav028c_mode = {
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 8,
+ .htotal = 1080 + 32 + 8 + 32,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 2,
+ .vtotal = 1920 + 16 + 2 + 14,
+ .clock = 134920,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 127,
+};
+
+static const struct hx8394_panel_desc hl055fhav028c_desc = {
+ .mode = &hl055fhav028c_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hl055fhav028c_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
int ret;
- ret = ctx->desc->init_sequence(ctx);
- if (ret) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->init_sequence(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
/* Panel is operational 120 msec after reset */
msleep(120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
goto sleep_in;
- }
return 0;
sleep_in:
+ ret = dsi_ctx.accum_err;
+ dsi_ctx.accum_err = 0;
+
/* This will probably fail, but let's try orderly power off anyway. */
- if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
- msleep(50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
return ret;
}
@@ -530,17 +655,12 @@ static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- msleep(50); /* about 3 frames */
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx8394_unprepare(struct drm_panel *panel)
@@ -632,9 +752,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
struct hx8394 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx8394, panel,
+ &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -666,9 +788,6 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
- drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -704,6 +823,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "huiling,hl055fhav028c", .data = &hl055fhav028c_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 94b7dfef3b5e..6ed544a83bdd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -722,9 +722,10 @@ static int ili9322_probe(struct spi_device *spi)
int ret;
int i;
- ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9322, panel,
+ &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
spi_set_drvdata(spi, ili);
@@ -883,9 +884,6 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index ff39f5dd4097..f7425dfaa50d 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -173,7 +173,6 @@ struct ili9341_config {
};
struct ili9341 {
- struct device *dev;
const struct ili9341_config *conf;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
@@ -490,9 +489,11 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct ili9341 *ili;
int ret;
- ili = devm_kzalloc(dev, sizeof(struct ili9341), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9341, panel,
+ &ili9341_dpi_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
ili->dbi = devm_kzalloc(dev, sizeof(struct mipi_dbi),
GFP_KERNEL);
@@ -526,8 +527,6 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
}
ili->max_spi_speed = ili->conf->max_spi_speed;
- drm_panel_init(&ili->panel, dev, &ili9341_dpi_funcs,
- DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
index 1cbc25758bd2..e6c483851f1f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -307,9 +307,12 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9805 *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9805, panel,
+ &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
@@ -320,9 +323,6 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
dsi->lanes = 2;
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return PTR_ERR(ctx->dvdd);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index a3c79ad99d0b..18aa6222b0c5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -166,9 +166,10 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9806e_panel *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ili9806e_panel, panel, &ili9806e_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = device_get_match_data(dev);
@@ -192,9 +193,6 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->lanes = ctx->desc->lanes;
- drm_panel_init(&ctx->panel, dev, &ili9806e_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (ret)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 28cd7560e5db..ac433345a179 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -43,6 +43,7 @@ struct ili9881c_desc {
const struct drm_display_mode *mode;
const unsigned long mode_flags;
u8 default_address_mode;
+ unsigned int lanes;
};
struct ili9881c {
@@ -1223,6 +1224,199 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_7inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x61),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x61),
+ ILI9881C_COMMAND_INSTR(0x10, 0x61),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x10),
+ ILI9881C_COMMAND_INSTR(0x51, 0x32),
+ ILI9881C_COMMAND_INSTR(0x52, 0x54),
+ ILI9881C_COMMAND_INSTR(0x53, 0x76),
+ ILI9881C_COMMAND_INSTR(0x54, 0x98),
+ ILI9881C_COMMAND_INSTR(0x55, 0xba),
+ ILI9881C_COMMAND_INSTR(0x56, 0x10),
+ ILI9881C_COMMAND_INSTR(0x57, 0x32),
+ ILI9881C_COMMAND_INSTR(0x58, 0x54),
+ ILI9881C_COMMAND_INSTR(0x59, 0x76),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x98),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xba),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xfe),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x60, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x06),
+ ILI9881C_COMMAND_INSTR(0x64, 0x07),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x01),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x76, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x80, 0x00),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x14),
+ ILI9881C_COMMAND_INSTR(0x83, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x7d),
+ ILI9881C_COMMAND_INSTR(0x55, 0x8f),
+ ILI9881C_COMMAND_INSTR(0x40, 0x33),
+ ILI9881C_COMMAND_INSTR(0x50, 0x96),
+ ILI9881C_COMMAND_INSTR(0x51, 0x96),
+ ILI9881C_COMMAND_INSTR(0x60, 0x23),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1449,6 +1643,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_7inch_default_mode = {
+ .clock = 83330,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 239,
+ .hsync_end = 720 + 239 + 33,
+ .htotal = 720 + 239 + 33 + 50,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 2,
+ .vtotal = 1280 + 20 + 2 + 30,
+
+ .width_mm = 90,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1506,16 +1717,15 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9881c *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9881c, panel, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
@@ -1549,7 +1759,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = 4;
+ dsi->lanes = ctx->desc->lanes;
return mipi_dsi_attach(dsi);
}
@@ -1567,6 +1777,7 @@ static const struct ili9881c_desc lhr050h41_desc = {
.init_length = ARRAY_SIZE(lhr050h41_init),
.mode = &lhr050h41_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc k101_im2byl02_desc = {
@@ -1574,6 +1785,7 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.init_length = ARRAY_SIZE(k101_im2byl02_init),
.mode = &k101_im2byl02_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc kd050hdfia020_desc = {
@@ -1599,6 +1811,7 @@ static const struct ili9881c_desc w552946aba_desc = {
.mode = &w552946aba_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
};
static const struct ili9881c_desc am8001280g_desc = {
@@ -1609,6 +1822,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_7inch_desc = {
+ .init = rpi_7inch_init,
+ .init_length = ARRAY_SIZE(rpi_7inch_init),
+ .mode = &rpi_7inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
@@ -1616,6 +1837,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 3c24a63b6be8..85c7059be214 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -614,8 +614,6 @@ static int ili9882t_add(struct ili9882t *ili)
gpiod_set_value(ili->enable_gpio, 0);
- drm_panel_init(&ili->base, dev, &ili9882t_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -640,9 +638,11 @@ static int ili9882t_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- ili = devm_kzalloc(&dsi->dev, sizeof(*ili), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(&dsi->dev, __typeof(*ili), base,
+ &ili9882t_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index f85b7a4cbb42..b2309900873b 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -204,9 +204,11 @@ static int ej030na_probe(struct spi_device *spi)
struct ej030na *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct ej030na, panel,
+ &ej030na_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -231,9 +233,6 @@ static int ej030na_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &ej030na_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d95c0d4f3e35..80afeeab9475 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -382,9 +382,11 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
struct device *dev = &dsi->dev;
int err, i;
- innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
+ innolux = devm_drm_panel_alloc(dev, struct innolux_panel, base,
+ &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(innolux))
+ return PTR_ERR(innolux);
innolux->desc = desc;
@@ -410,9 +412,6 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&innolux->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index eb0f8373258c..5c2530598ddb 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1120,9 +1120,10 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
struct jadard *jadard;
int ret;
- jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
- if (!jadard)
- return -ENOMEM;
+ jadard = devm_drm_panel_alloc(dev, struct jadard, panel, &jadard_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(jadard))
+ return PTR_ERR(jadard);
desc = of_device_get_match_data(dev);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -1148,9 +1149,6 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(jadard->vccio);
}
- drm_panel_init(&jadard->panel, dev, &jadard_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 4eb71e85e9e9..cbe354b51bce 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -175,9 +175,11 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
struct jdi_fhd_r63452 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct jdi_fhd_r63452, panel,
+ &jdi_fhd_r63452_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -192,8 +194,6 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5b5082efb282..5f897e143758 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -435,9 +435,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to create backlight\n");
- drm_panel_init(&jdi->base, &jdi->link1->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -475,10 +472,13 @@ static int jdi_panel_dsi_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi) {
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi),
+ base, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(jdi);
}
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index b1ce186de261..3513e5c4dd8c 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -402,9 +402,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to register backlight %d\n", ret);
- drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -426,9 +423,11 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi)
- return -ENOMEM;
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), base,
+ &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi))
+ return PTR_ERR(jdi);
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index 0e5e8e57bd1e..67ca055f06f3 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -821,9 +821,6 @@ static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
"failed to get enable gpio");
- drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
- &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&khadas_ts050->base);
if (err)
return err;
@@ -850,10 +847,12 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
- GFP_KERNEL);
- if (!khadas_ts050)
- return -ENOMEM;
+ khadas_ts050 = devm_drm_panel_alloc(&dsi->dev, __typeof(*khadas_ts050),
+ base, &khadas_ts050_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(khadas_ts050))
+ return PTR_ERR(khadas_ts050);
khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index d6b912277196..2fc7b0779b37 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -337,9 +337,6 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
kingdisplay->enable_gpio = NULL;
}
- drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
- &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&kingdisplay->base);
if (err)
return err;
@@ -364,9 +361,12 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL);
- if (!kingdisplay)
- return -ENOMEM;
+ kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
+ &kingdisplay_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(kingdisplay))
+ return PTR_ERR(kingdisplay);
mipi_dsi_set_drvdata(dsi, kingdisplay);
kingdisplay->link = dsi;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 77f74e6c467e..0856df5a6ee2 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -548,9 +548,11 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
struct ltk050h3146w *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk050h3146w, panel,
+ &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -577,9 +579,6 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_desc->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 6b18cf00fd4a..7f19fd5b8060 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -604,9 +604,11 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk500hd1829, panel,
+ &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -643,9 +645,6 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 9d0d4faa3f58..b2be6727bf73 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -178,9 +178,10 @@ static int lb035q02_probe(struct spi_device *spi)
struct lb035q02_device *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct lb035q02_device, panel,
+ &lb035q02_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -195,9 +196,6 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index cf246d15b7b6..dec619902c15 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -243,9 +243,11 @@ static int lg4573_probe(struct spi_device *spi)
struct lg4573 *ctx;
int ret;
- ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&spi->dev, struct lg4573, panel,
+ &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->spi = spi;
@@ -258,9 +260,6 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index f3dcc39670ea..46a56ea92ad9 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -246,8 +246,6 @@ static int sw43408_add(struct sw43408_panel *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->base);
return ret;
}
@@ -257,9 +255,11 @@ static int sw43408_probe(struct mipi_dsi_device *dsi)
struct sw43408_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsi->mode_flags = MIPI_DSI_MODE_LPM;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
index 032c542aab0f..24b34443ace0 100644
--- a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
+++ b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
@@ -190,9 +190,11 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST);
- lcd = devm_kzalloc(&dsi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lincoln_lcd197_panel, panel,
+ &lincoln_lcd197_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
mipi_dsi_set_drvdata(dsi, lcd);
lcd->dsi = dsi;
@@ -214,9 +216,6 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(lcd->reset_gpio),
"failed to get reset gpio");
- drm_panel_init(&lcd->panel, dev,
- &lincoln_lcd197_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&lcd->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ba6c015aabba..23fd535d8f47 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -164,9 +164,11 @@ static int panel_lvds_probe(struct platform_device *pdev)
struct panel_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_panel_alloc(&pdev->dev, struct panel_lvds, panel,
+ &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = &pdev->dev;
@@ -214,10 +216,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
* driver.
*/
- /* Register the panel. */
- drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
- DRM_MODE_CONNECTOR_LVDS);
-
ret = drm_panel_of_backlight(&lvds->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
index 799c2161fc85..cde168ec631c 100644
--- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -370,9 +370,11 @@ static int d53e6ea8966_probe(struct spi_device *spi)
.node = NULL,
};
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct d53e6ea8966, panel,
+ &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
spi_set_drvdata(spi, db);
@@ -425,9 +427,6 @@ static int d53e6ea8966_probe(struct spi_device *spi)
db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (db->panel_info->backlight_register) {
ret = db->panel_info->backlight_register(db);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 4db852ffb0f6..55664f5d5aa5 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -234,9 +234,11 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
struct mantix *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct mantix, panel, &mantix_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
@@ -271,9 +273,6 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vddi))
return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
- drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 81c5c541a351..d5c7210de4af 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -178,9 +178,10 @@ static int nl8048_probe(struct spi_device *spi)
struct nl8048_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct nl8048_panel, panel,
+ &nl8048_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -204,9 +205,6 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 5d115ecd5dd4..22560384e48e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -361,9 +361,11 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
struct panel_nv3051d *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct panel_nv3051d, panel,
+ &panel_nv3051d_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -391,9 +393,6 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_info->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -413,15 +412,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi)
{
struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+ drm_panel_unprepare(&ctx->panel);
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+ drm_panel_disable(&ctx->panel);
}
static void panel_nv3051d_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 06e16a7c14a7..0db9cadd868e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -777,9 +777,10 @@ static int nv3052c_probe(struct spi_device *spi)
struct nv3052c *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct nv3052c, panel, &nv3052c_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->dev = dev;
@@ -803,9 +804,6 @@ static int nv3052c_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- drm_panel_init(&priv->panel, dev, &nv3052c_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return dev_err_probe(dev, err, "Failed to attach backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 549b86f2cc28..3189d89c7ca0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1087,9 +1087,12 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
struct nt35510 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35510, panel,
+ &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
mipi_dsi_set_drvdata(dsi, nt);
nt->dev = dev;
@@ -1142,9 +1145,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(nt->reset_gpio);
}
- drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/*
* First, try to locate an external backlight (such as on GPIO)
* if this fails, assume we will want to use the internal backlight
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 5bbea734123b..98f0782c8411 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -456,9 +456,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
struct nt35560 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35560, panel,
+ &nt35560_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
nt->video_mode = of_property_read_bool(dev->of_node,
"enforce-video-mode");
@@ -502,9 +505,6 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
"failed to request GPIO\n");
- drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt,
&nt35560_bl_ops, &nt35560_bl_props);
if (IS_ERR(nt->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 08b22b592ab0..94aa6489d99f 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -449,9 +449,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, num_dsis = 1, ret;
- nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35950, panel, &nt35950_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
ret = nt35950_sharp_init_vregs(nt, dev);
if (ret)
@@ -491,9 +492,6 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
nt->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, nt);
- drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&nt->panel);
if (ret) {
if (num_dsis == 2)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 04f1d2676c78..32cf64c7c18b 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,10 +23,12 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
- do { \
- mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
- mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
+ do { \
+ dsi_ctx.dsi = dsi0; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
+ dsi_ctx.dsi = dsi1; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
} while (0)
struct panel_info {
@@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
- mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xb6,
- 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
- mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
- mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
- mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- usleep_range(10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(100);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6,
+ 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x05, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode elish_boe_modes[] = {
@@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel)
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- int i, ret;
+ int i;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
msleep(70);
@@ -1208,9 +1171,11 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, ret;
- pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(dev, struct panel_info, panel,
+ &nt36523_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
pinfo->vddio = devm_regulator_get(dev, "vddio");
if (IS_ERR(pinfo->vddio))
@@ -1248,7 +1213,6 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
pinfo->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
- drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = of_drm_get_panel_orientation(dev->of_node, &pinfo->orientation);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index c2abd20e0734..29e1f6aea480 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -608,8 +608,6 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
"failed to get reset gpio from DT\n");
- drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
@@ -625,9 +623,11 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
const struct nt36672a_panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index 8c9e04207ba9..c5e00eb55722 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -522,9 +522,11 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx;
int i, ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct nt36672e_panel, panel,
+ &nt36672e_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc) {
@@ -553,8 +555,6 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->mode_flags = ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt37801.c b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
new file mode 100644
index 000000000000..d6a37d7e0cc6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/mipi_display.h>
+
+struct novatek_nt37801 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data novatek_nt37801_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
+};
+
+static inline struct novatek_nt37801 *to_novatek_nt37801(struct drm_panel *panel)
+{
+ return container_of(panel, struct novatek_nt37801, panel);
+}
+
+static void novatek_nt37801_reset(struct novatek_nt37801 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+}
+
+#define NT37801_DCS_SWITCH_PAGE 0xf0
+
+#define novatek_nt37801_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi((dsi_ctx), NT37801_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
+static int novatek_nt37801_on(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc5, 0x0b, 0x0b, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf5, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x1b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x00);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x059f);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0c7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x03, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91,
+ 0x89, 0x28, 0x00, 0x28, 0xc2, 0x00, 0x02,
+ 0x68, 0x04, 0x6c, 0x00, 0x0a, 0x02, 0x77,
+ 0x01, 0xe9, 0x10, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb,
+ 0x00, 0x01, 0x00, 0x11, 0x33, 0x33, 0x33,
+ 0x55, 0x57, 0xd0, 0x00, 0x00, 0x44, 0x56,
+ 0x77, 0x78, 0x9a, 0xbc, 0xdd, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xdc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x00);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x00, 0x18, 0x00, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51,
+ 0x07, 0xff, 0x07, 0xff, 0x0f, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x00);
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x55, 0x01, 0xff, 0x03);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_off(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_prepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ novatek_nt37801_reset(ctx);
+
+ ret = novatek_nt37801_on(ctx);
+ if (ret < 0)
+ goto err;
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+
+ ret = mipi_dsi_picture_parameter_set(ctx->dsi, &pps);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to transmit PPS: %d\n", ret);
+ goto err;
+ }
+
+ ret = mipi_dsi_compression_mode(ctx->dsi, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable compression mode: %d\n", ret);
+ goto err;
+ }
+
+ msleep(28);
+
+ return 0;
+
+err:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return ret;
+}
+
+static int novatek_nt37801_unprepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = novatek_nt37801_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode novatek_nt37801_mode = {
+ .clock = (1440 + 20 + 4 + 20) * (3200 + 20 + 2 + 18) * 120 / 1000,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 20,
+ .hsync_end = 1440 + 20 + 4,
+ .htotal = 1440 + 20 + 4 + 20,
+ .vdisplay = 3200,
+ .vsync_start = 3200 + 20,
+ .vsync_end = 3200 + 20 + 2,
+ .vtotal = 3200 + 20 + 2 + 18,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int novatek_nt37801_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &novatek_nt37801_mode);
+}
+
+static const struct drm_panel_funcs novatek_nt37801_panel_funcs = {
+ .prepare = novatek_nt37801_prepare,
+ .unprepare = novatek_nt37801_unprepare,
+ .get_modes = novatek_nt37801_get_modes,
+};
+
+static int novatek_nt37801_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops novatek_nt37801_bl_ops = {
+ .update_status = novatek_nt37801_bl_update_status,
+};
+
+static struct backlight_device *
+novatek_nt37801_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &novatek_nt37801_bl_ops, &props);
+}
+
+static int novatek_nt37801_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct novatek_nt37801 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct novatek_nt37801, panel,
+ &novatek_nt37801_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(novatek_nt37801_supplies),
+ novatek_nt37801_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+ ctx->panel.backlight = novatek_nt37801_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ /* This panel only supports DSC; unconditionally enable it */
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 40;
+ ctx->dsc.slice_width = 720;
+ ctx->dsc.slice_count = 1440 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */
+ ctx->dsc.block_pred_enable = true;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void novatek_nt37801_remove(struct mipi_dsi_device *dsi)
+{
+ struct novatek_nt37801 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id novatek_nt37801_of_match[] = {
+ { .compatible = "novatek,nt37801" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, novatek_nt37801_of_match);
+
+static struct mipi_dsi_driver novatek_nt37801_driver = {
+ .probe = novatek_nt37801_probe,
+ .remove = novatek_nt37801_remove,
+ .driver = {
+ .name = "panel-novatek-nt37801",
+ .of_match_table = novatek_nt37801_of_match,
+ },
+};
+module_mipi_dsi_driver(novatek_nt37801_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Novatek NT37801/NT37810 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 9fa7654e2b67..a629976bae54 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -246,9 +246,10 @@ static int nt39016_probe(struct spi_device *spi)
struct nt39016 *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct nt39016, drm_panel, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -279,9 +280,6 @@ static int nt39016_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err)
return dev_err_probe(dev, err, "Failed to get backlight handle\n");
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 94ae8c8270b8..66f99982f360 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -175,9 +175,11 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lcd_olinuxino, panel,
+ &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
i2c_set_clientdata(client, lcd);
lcd->dev = dev;
@@ -234,9 +236,6 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index fc87f61d4400..3231e84dc66c 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -237,9 +237,11 @@ static int ota5601a_probe(struct spi_device *spi)
struct ota5601a *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct ota5601a, drm_panel,
+ &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -273,9 +275,6 @@ static int ota5601a_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err) {
if (err != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87bbb25d119a..a0f58c3b73f6 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -424,9 +424,11 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
struct otm8009a *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct otm8009a, panel,
+ &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -451,9 +453,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dev, ctx,
&otm8009a_backlight_ops,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index dbea84f51514..2334b77f348c 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -132,9 +132,6 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
- drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
- &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&osd101t2587->base);
if (ret)
return ret;
@@ -161,9 +158,12 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET;
- osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
- if (!osd101t2587)
- return -ENOMEM;
+ osd101t2587 = devm_drm_panel_alloc(&dsi->dev, __typeof(*osd101t2587), base,
+ &osd101t2587_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(osd101t2587))
+ return PTR_ERR(osd101t2587);
mipi_dsi_set_drvdata(dsi, osd101t2587);
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index d1c5c9bc3c56..3c3308fc55df 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -166,9 +166,6 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
- drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
- &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&wuxga_nt->base);
if (ret)
return ret;
@@ -196,9 +193,12 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
- if (!wuxga_nt)
- return -ENOMEM;
+ wuxga_nt = devm_drm_panel_alloc(&dsi->dev, __typeof(*wuxga_nt), base,
+ &wuxga_nt_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(wuxga_nt))
+ return PTR_ERR(wuxga_nt);
mipi_dsi_set_drvdata(dsi, wuxga_nt);
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index e10e469aa7a6..dc4bb8ad9131 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -373,9 +373,12 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
.node = NULL,
};
- ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
- if (!ts)
- return -ENOMEM;
+ ts = devm_drm_panel_alloc(dev, __typeof(*ts), base,
+ &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ts))
+ return PTR_ERR(ts);
i2c_set_clientdata(i2c, ts);
@@ -428,9 +431,6 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
return PTR_ERR(ts->dsi);
}
- drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index b2029e035635..2af6aa47a551 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -527,9 +527,11 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
int ret;
u32 video_mode;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct rad_panel, panel,
+ &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
@@ -586,8 +588,6 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
index 64b685dc11f6..333faed62da7 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -36,12 +36,14 @@ static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel
static void raydium_rm67200_reset(struct raydium_rm67200 *ctx)
{
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ }
}
static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx,
@@ -318,6 +320,7 @@ static void w552793baa_setup(struct mipi_dsi_multi_context *ctx)
static int raydium_rm67200_prepare(struct drm_panel *panel)
{
struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
int ret;
ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies);
@@ -328,6 +331,12 @@ static int raydium_rm67200_prepare(struct drm_panel *panel)
msleep(60);
+ ctx->panel_info->panel_setup(&mctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 30);
+
return 0;
}
@@ -343,20 +352,6 @@ static int raydium_rm67200_unprepare(struct drm_panel *panel)
return 0;
}
-static int raydium_rm67200_enable(struct drm_panel *panel)
-{
- struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
- struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
-
- rm67200->panel_info->panel_setup(&ctx);
- mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
- mipi_dsi_msleep(&ctx, 120);
- mipi_dsi_dcs_set_display_on_multi(&ctx);
- mipi_dsi_msleep(&ctx, 30);
-
- return ctx.accum_err;
-}
-
static int raydium_rm67200_disable(struct drm_panel *panel)
{
struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
@@ -381,7 +376,6 @@ static const struct drm_panel_funcs raydium_rm67200_funcs = {
.prepare = raydium_rm67200_prepare,
.unprepare = raydium_rm67200_unprepare,
.get_modes = raydium_rm67200_get_modes,
- .enable = raydium_rm67200_enable,
.disable = raydium_rm67200_disable,
};
@@ -391,9 +385,11 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
struct raydium_rm67200 *ctx;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct raydium_rm67200, panel,
+ &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_info = device_get_match_data(dev);
if (!ctx->panel_info)
@@ -407,7 +403,7 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
@@ -421,9 +417,6 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -471,6 +464,7 @@ static const struct raydium_rm67200_panel_info w552793baa_info = {
.vtotal = 1952,
.width_mm = 68, /* 68.04mm */
.height_mm = 121, /* 120.96mm */
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
.type = DRM_MODE_TYPE_DRIVER,
},
.regulators = w552793baa_regulators,
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 7b7fe987e292..669b5f5c1ad9 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -327,9 +327,11 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
struct rm68200 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm68200, panel,
+ &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -355,9 +357,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
index ea1b728e85a2..8e9484768657 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
@@ -281,9 +281,11 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
struct rm692e5_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm692e5_panel, panel,
+ &rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "dvdd";
@@ -306,8 +308,6 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
index d3071c01aaea..86769cadec97 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -208,9 +208,11 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
struct device_node *dsi_sec;
int ret, i;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm69380_panel, panel,
+ &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "avdd";
@@ -248,8 +250,6 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
ctx->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
- drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm69380_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-renesas-r61307.c b/drivers/gpu/drm/panel/panel-renesas-r61307.c
new file mode 100644
index 000000000000..319415194839
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r61307.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R61307_MACP 0xb0 /* Manufacturer CMD Protect */
+#define R61307_MACP_ON 0x03
+#define R61307_MACP_OFF 0x04
+
+#define R61307_INVERSION 0xc1
+#define R61307_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R61307_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R61307_GAMMA_SET_C 0xca /* Gamma Setting C */
+#define R61307_CONTRAST_SET 0xcc
+
+struct renesas_r61307 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vcc_supply;
+ struct regulator *iovcc_supply;
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+
+ bool dig_cont_adj;
+ bool inversion;
+ u32 gamma;
+};
+
+static const u8 gamma_setting[][25] = {
+ { /* sentinel */ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x0b, 0x0d, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06,
+ 0x0a, 0x0c, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06
+ },
+};
+
+static inline struct renesas_r61307 *to_renesas_r61307(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r61307, panel);
+}
+
+static void renesas_r61307_reset(struct renesas_r61307 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r61307_prepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ ret = regulator_enable(priv->iovcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable iovcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ renesas_r61307_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r61307_enable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 80);
+
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_OFF);
+
+ if (priv->dig_cont_adj)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_CONTRAST_SET,
+ 0xdc, 0xb4, 0xff);
+
+ if (priv->gamma)
+ mipi_dsi_generic_write_multi(&ctx, gamma_setting[priv->gamma],
+ sizeof(gamma_setting[priv->gamma]));
+
+ if (priv->inversion)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x50, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x11);
+ else
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x10, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x01);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r61307_disable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 100);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r61307_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->iovcc_supply);
+ usleep_range(2000, 3000);
+ regulator_disable(priv->vcc_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r61307_mode = {
+ .clock = (768 + 116 + 81 + 5) * (1024 + 24 + 8 + 2) * 60 / 1000,
+ .hdisplay = 768,
+ .hsync_start = 768 + 116,
+ .hsync_end = 768 + 116 + 81,
+ .htotal = 768 + 116 + 81 + 5,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 24,
+ .vsync_end = 1024 + 24 + 8,
+ .vtotal = 1024 + 24 + 8 + 2,
+ .width_mm = 76,
+ .height_mm = 101,
+};
+
+static int renesas_r61307_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r61307_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r61307_panel_funcs = {
+ .prepare = renesas_r61307_prepare,
+ .enable = renesas_r61307_enable,
+ .disable = renesas_r61307_disable,
+ .unprepare = renesas_r61307_unprepare,
+ .get_modes = renesas_r61307_get_modes,
+};
+
+static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r61307 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r61307, panel,
+ &renesas_r61307_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vcc_supply = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get vcc-supply\n");
+
+ priv->iovcc_supply = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(priv->iovcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->iovcc_supply),
+ "Failed to get iovcc-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset gpios\n");
+
+ if (device_property_read_bool(dev, "renesas,inversion"))
+ priv->inversion = true;
+
+ if (device_property_read_bool(dev, "renesas,contrast"))
+ priv->dig_cont_adj = true;
+
+ priv->gamma = 0;
+ device_property_read_u32(dev, "renesas,gamma", &priv->gamma);
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r61307_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r61307 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r61307_of_match[] = {
+ { .compatible = "hit,tx13d100vm0eaa" },
+ { .compatible = "koe,tx13d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r61307_of_match);
+
+static struct mipi_dsi_driver renesas_r61307_driver = {
+ .probe = renesas_r61307_probe,
+ .remove = renesas_r61307_remove,
+ .driver = {
+ .name = "panel-renesas-r61307",
+ .of_match_table = renesas_r61307_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r61307_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R61307-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-renesas-r69328.c b/drivers/gpu/drm/panel/panel-renesas-r69328.c
new file mode 100644
index 000000000000..46287ab04c30
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r69328.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R69328_MACP 0xb0 /* Manufacturer Access CMD Protect */
+#define R69328_MACP_ON 0x03
+#define R69328_MACP_OFF 0x04
+
+#define R69328_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R69328_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R69328_GAMMA_SET_C 0xca /* Gamma Setting C */
+
+#define R69328_POWER_SET 0xd1
+
+struct renesas_r69328 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct renesas_r69328 *to_renesas_r69328(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r69328, panel);
+}
+
+static void renesas_r69328_reset(struct renesas_r69328 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r69328_prepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(priv->vddio_supply);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable vddio power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ renesas_r69328_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r69328_enable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ /* Set address mode */
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 100);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_OFF);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_POWER_SET, 0x14, 0x1d,
+ 0x21, 0x67, 0x11, 0x9a);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_A, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_B, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_C, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r69328_disable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r69328_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->vddio_supply);
+ regulator_disable(priv->vdd_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r69328_mode = {
+ .clock = (720 + 92 + 62 + 4) * (1280 + 6 + 3 + 1) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 92,
+ .hsync_end = 720 + 92 + 62,
+ .htotal = 720 + 92 + 62 + 4,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 3,
+ .vtotal = 1280 + 6 + 3 + 1,
+ .width_mm = 59,
+ .height_mm = 105,
+};
+
+static int renesas_r69328_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r69328_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r69328_panel_funcs = {
+ .prepare = renesas_r69328_prepare,
+ .enable = renesas_r69328_enable,
+ .disable = renesas_r69328_disable,
+ .unprepare = renesas_r69328_unprepare,
+ .get_modes = renesas_r69328_get_modes,
+};
+
+static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r69328 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r69328, panel,
+ &renesas_r69328_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vdd_supply),
+ "Failed to get vdd-supply\n");
+
+ priv->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(priv->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vddio_supply),
+ "Failed to get vddio-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r69328_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r69328 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r69328_of_match[] = {
+ { .compatible = "jdi,dx12d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r69328_of_match);
+
+static struct mipi_dsi_driver renesas_r69328_driver = {
+ .probe = renesas_r69328_probe,
+ .remove = renesas_r69328_remove,
+ .driver = {
+ .name = "panel-renesas-r69328",
+ .of_match_table = renesas_r69328_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r69328_driver);
+
+MODULE_AUTHOR("Maxim Schwalm <maxim.schwalm@gmail.com>");
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R69328-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 2ef5ea5eaeeb..ad35d0fb0a16 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -143,9 +143,11 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
struct rb070d30_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct rb070d30_panel, panel,
+ &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
if (IS_ERR(ctx->supply))
@@ -154,9 +156,6 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
index cf6186312252..188dd7cf0297 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
@@ -211,9 +211,11 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
struct ams581vf01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ams581vf01, panel,
+ &ams581vf01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams581vf01_supplies),
@@ -235,8 +237,6 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams581vf01_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
index 817365cb5e46..f8ebbd4a530b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
@@ -257,9 +257,11 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
struct ams639rq08 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ams639rq08, panel,
+ &ams639rq08_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams639rq08_supplies),
@@ -281,8 +283,6 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams639rq08_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 9a482a744b8c..20ec27d2d6c2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -266,9 +266,12 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
struct device *dev = &aux_ep->dev;
int ret;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct atana33xc20_panel, base,
+ &atana33xc20_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
dev_set_drvdata(dev, panel);
panel->aux = aux_ep->aux;
@@ -301,8 +304,6 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
if (ret)
return ret;
- drm_panel_init(&panel->base, dev, &atana33xc20_funcs, DRM_MODE_CONNECTOR_eDP);
-
pm_runtime_get_sync(dev);
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 14c6700e37b3..a97182f3c990 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -267,9 +267,11 @@ static int db7430_probe(struct spi_device *spi)
struct db7430 *db;
int ret;
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct db7430, panel, &db7430_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
+
db->dev = dev;
/*
@@ -294,9 +296,6 @@ static int db7430_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
- drm_panel_init(&db->panel, dev, &db7430_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
/* FIXME: if no external backlight, use internal backlight */
ret = drm_panel_of_backlight(&db->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9f438683a6f6..c7f2241523a0 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -339,9 +339,11 @@ static int ld9040_probe(struct spi_device *spi)
struct ld9040 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ld9040, panel,
+ &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
@@ -373,9 +375,6 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
ctx, &ld9040_bl_ops,
&ld9040_bl_props);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 79f611963c61..ba1a02000bb9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -166,9 +166,11 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
struct s6d16d0 *s6;
int ret;
- s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
- if (!s6)
- return -ENOMEM;
+ s6 = devm_drm_panel_alloc(dev, struct s6d16d0, panel,
+ &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s6))
+ return PTR_ERR(s6);
mipi_dsi_set_drvdata(dsi, s6);
s6->dev = dev;
@@ -200,9 +202,6 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&s6->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 2adb223a895c..300dc19bd9d1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -247,9 +247,11 @@ static int s6d27a1_probe(struct spi_device *spi)
struct s6d27a1 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d27a1, panel,
+ &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -277,9 +279,6 @@ static int s6d27a1_probe(struct spi_device *spi)
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
- drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index f23d8832a1ad..692020081524 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -34,8 +34,8 @@ struct s6d7aa0 {
struct s6d7aa0_panel_desc {
unsigned int panel_type;
- int (*init_func)(struct s6d7aa0 *ctx);
- int (*off_func)(struct s6d7aa0 *ctx);
+ void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx);
+ void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
@@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
msleep(50);
}
-static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
+static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
if (lock) {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a);
} else {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5);
}
-
- return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->init_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
+ ctx->desc->init_func(ctx, &dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6d7aa0_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->off_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Panel-specific off function failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->off_func(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(64);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 64);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel)
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6d7aa0_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6d7aa0_off(ctx);
return 0;
}
@@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
@@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
/* Initialization code and structures for LSL080AL02 panel */
-static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- usleep_range(20000, 25000);
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
- mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
- usleep_range(1000, 1500);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10);
+ mipi_dsi_usleep_range(dsi_ctx, 1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10);
/* BL_CTL_ON */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28);
- usleep_range(5000, 6000);
+ mipi_dsi_usleep_range(dsi_ctx, 5000, 6000);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
- msleep(120);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_msleep(dsi_ctx, 120);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, true);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
/* BL_CTL_OFF */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20);
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
@@ -308,7 +244,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.init_func = s6d7aa0_lsl080al02_init,
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
- .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP,
.bus_flags = 0,
.has_backlight = false,
@@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
/* Initialization code and structures for LSL080AL03 panel */
-static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- usleep_range(20000, 25000);
-
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x78);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x68);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x68);
}
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
-
- usleep_range(10000, 11000);
-
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xcd,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03);
- return 0;
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
+ s6d7aa0_lock(ctx, dsi_ctx, true);
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00);
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
@@ -484,9 +392,11 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
struct s6d7aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d7aa0, panel,
+ &s6d7aa0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc)
@@ -512,8 +422,6 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &s6d7aa0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
index 27a059b55ae5..f4d75eca3cdf 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -185,9 +185,11 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
struct s6e3fa7_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3fa7_panel, panel,
+ &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -202,8 +204,6 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index ab8b58545284..1db0c63b1131 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -681,9 +681,11 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
struct s6e3ha2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3ha2, panel,
+ &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -731,8 +733,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
- drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
index 64c6f7d45bed..550e9ef9bb71 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
@@ -253,9 +253,11 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
struct s6e3ha8 *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct s6e3ha8, panel,
+ &s6e3ha8_amb577px01_wqhd_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies),
s6e3ha8_supplies,
@@ -279,8 +281,6 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
priv->panel.prepare_prev_first = true;
drm_panel_add(&priv->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 364f1c9a16d9..6f3d39556f92 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -437,9 +437,11 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
struct s6e63j0x03 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e63j0x03, panel,
+ &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -462,8 +464,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"cannot get reset-gpio\n");
- drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6917ffda5b2b..ea241c89593b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -13,6 +13,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e92e95158d1f..e91f50662997 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -687,9 +687,11 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams427ap24 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams427ap24, panel,
+ &s6e88a0_ams427ap24_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(dev,
ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
@@ -711,8 +713,6 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 57b1a899bbdc..ca5cad41ff1d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -165,9 +165,11 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams452ef01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams452ef01, panel,
+ &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -192,9 +194,6 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index c51d07ec1529..1b5c500d4f4e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -979,9 +979,11 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
struct s6e8aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa0, panel,
+ &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -990,7 +992,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
- | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+ | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
ret = s6e8aa0_parse_dt(ctx);
if (ret < 0)
@@ -1014,8 +1016,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 04ce925b3d9d..064258217d50 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech>
+/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
@@ -22,7 +22,6 @@ struct sofef00_panel {
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
- const struct drm_display_mode *mode;
};
static inline
@@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx)
static int sofef00_panel_on(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_off(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(40);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 40);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(160);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 160);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_prepare(struct drm_panel *panel)
@@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
ret = sofef00_panel_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = sofef00_panel_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sofef00_panel_off(ctx);
regulator_disable(ctx->supply);
return 0;
@@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
.height_mm = 145,
};
-static const struct drm_display_mode fajita_panel_mode = {
- .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
- .hdisplay = 1080,
- .hsync_start = 1080 + 72,
- .hsync_end = 1080 + 72 + 16,
- .htotal = 1080 + 72 + 16 + 36,
- .vdisplay = 2340,
- .vsync_start = 2340 + 32,
- .vsync_end = 2340 + 32 + 4,
- .vtotal = 2340 + 32 + 4 + 18,
- .width_mm = 68,
- .height_mm = 145,
-};
-
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
- struct sofef00_panel *ctx = to_sofef00_panel(panel);
- mode = drm_mode_duplicate(connector->dev, ctx->mode);
+ mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
if (!mode)
return -ENOMEM;
@@ -235,16 +191,11 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
struct sofef00_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->mode = of_device_get_match_data(dev);
-
- if (!ctx->mode) {
- dev_err(dev, "Missing device mode\n");
- return -ENODEV;
- }
+ ctx = devm_drm_panel_alloc(dev, struct sofef00_panel, panel,
+ &sofef00_panel_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
@@ -262,9 +213,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
@@ -295,14 +243,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
- { // OnePlus 6 / enchilada
- .compatible = "samsung,sofef00",
- .data = &enchilada_panel_mode,
- },
- { // OnePlus 6T / fajita
- .compatible = "samsung,s6e3fc2x01",
- .data = &fajita_panel_mode,
- },
+ { .compatible = "samsung,sofef00" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@@ -318,6 +259,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
-MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 7d1b421ea9dd..0935d83ee2db 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -204,9 +204,11 @@ static int seiko_panel_probe(struct device *dev,
struct seiko_panel *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct seiko_panel, base,
+ &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -224,9 +226,6 @@ static int seiko_panel_probe(struct device *dev,
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
- drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index a0d76d588da1..d159b0e4fdb6 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -279,9 +279,6 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&sharp->base);
if (ret)
return ret;
@@ -323,10 +320,12 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
- if (!sharp) {
+ sharp = devm_drm_panel_alloc(&dsi->dev, __typeof(*sharp), base,
+ &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(sharp)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(sharp);
}
mipi_dsi_set_drvdata(dsi, sharp);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index a9673a52b861..938beac4655d 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -138,9 +138,10 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd;
- lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&pdev->dev, struct ls037v7dw01_panel, panel,
+ &ls037v7dw01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
platform_set_drvdata(pdev, lcd);
lcd->pdev = pdev;
@@ -181,9 +182,6 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 729cbb0d8403..36abfa2e65e9 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
/* Novatek two-lane operation */
- ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03);
/* Set both MCU and RGB I/F to 24bpp */
- ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
- (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,
+ MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 0b4e0983639b..0456f3d705e7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -193,9 +193,11 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
struct sharp_ls060 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sharp_ls060, panel,
+ &sharp_ls060_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->vddi_supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi_supply))
@@ -227,9 +229,6 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 232b03c1a259..3333d4a07504 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -136,6 +137,14 @@ struct panel_desc {
int connector_type;
};
+struct panel_desc_dsi {
+ struct panel_desc desc;
+
+ unsigned long flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+};
+
struct panel_simple {
struct drm_panel base;
@@ -430,10 +439,7 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
-static struct panel_desc panel_dpi;
-
-static int panel_dpi_probe(struct device *dev,
- struct panel_simple *panel)
+static struct panel_desc *panel_dpi_probe(struct device *dev)
{
struct display_timing *timing;
const struct device_node *np;
@@ -445,17 +451,17 @@ static int panel_dpi_probe(struct device *dev,
np = dev->of_node;
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
if (!timing)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = of_get_display_timing(np, "panel-timing", timing);
if (ret < 0) {
dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
np);
- return ret;
+ return ERR_PTR(ret);
}
desc->timings = timing;
@@ -473,9 +479,7 @@ static int panel_dpi_probe(struct device *dev,
/* We do not know the connector for the DT node, so guess it */
desc->connector_type = DRM_MODE_CONNECTOR_DPI;
- panel->desc = desc;
-
- return 0;
+ return desc;
}
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
@@ -570,8 +574,44 @@ static int panel_simple_override_nondefault_lvds_datamapping(struct device *dev,
return 0;
}
-static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+static const struct panel_desc *panel_simple_get_desc(struct device *dev)
{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI) &&
+ dev_is_mipi_dsi(dev)) {
+ const struct panel_desc_dsi *dsi_desc;
+
+ dsi_desc = of_device_get_match_data(dev);
+ if (!dsi_desc)
+ return ERR_PTR(-ENODEV);
+
+ return &dsi_desc->desc;
+ }
+
+ if (dev_is_platform(dev)) {
+ const struct panel_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc) {
+ /*
+ * panel-dpi probes without a descriptor and
+ * panel_dpi_probe() will initialize one for us
+ * based on the device tree.
+ */
+ if (of_device_is_compatible(dev->of_node, "panel-dpi"))
+ return panel_dpi_probe(dev);
+ else
+ return ERR_PTR(-ENODEV);
+ }
+
+ return desc;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct panel_simple *panel_simple_probe(struct device *dev)
+{
+ const struct panel_desc *desc;
struct panel_simple *panel;
struct display_timing dt;
struct device_node *ddc;
@@ -579,26 +619,31 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
u32 bus_flags;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ desc = panel_simple_get_desc(dev);
+ if (IS_ERR(desc))
+ return ERR_CAST(desc);
+
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, desc->connector_type);
+ if (IS_ERR(panel))
+ return ERR_CAST(panel);
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
- return PTR_ERR(panel->supply);
+ return ERR_CAST(panel->supply);
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio))
- return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
- "failed to request GPIO\n");
+ return dev_err_cast_probe(dev, panel->enable_gpio,
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
- return err;
+ return ERR_PTR(err);
}
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
@@ -607,19 +652,12 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
of_node_put(ddc);
if (!panel->ddc)
- return -EPROBE_DEFER;
+ return ERR_PTR(-EPROBE_DEFER);
}
- if (desc == &panel_dpi) {
- /* Handle the generic panel-dpi binding */
- err = panel_dpi_probe(dev, panel);
- if (err)
- goto free_ddc;
- desc = panel->desc;
- } else {
- if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
- }
+ if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
+ !of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
/* Optional data-mapping property for overriding bus format */
@@ -694,8 +732,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
-
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
@@ -704,7 +740,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
drm_panel_add(&panel->base);
- return 0;
+ return panel;
disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dev);
@@ -713,7 +749,7 @@ free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
- return err;
+ return ERR_PTR(err);
}
static void panel_simple_shutdown(struct device *dev)
@@ -1027,27 +1063,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
-static const struct drm_display_mode auo_g101evn010_mode = {
- .clock = 68930,
- .hdisplay = 1280,
- .hsync_start = 1280 + 82,
- .hsync_end = 1280 + 82 + 2,
- .htotal = 1280 + 82 + 2 + 84,
- .vdisplay = 800,
- .vsync_start = 800 + 8,
- .vsync_end = 800 + 8 + 2,
- .vtotal = 800 + 8 + 2 + 6,
+static const struct display_timing auo_g101evn010_timing = {
+ .pixelclock = { 64000000, 68930000, 85000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 8, 64, 256 },
+ .hback_porch = { 8, 64, 256 },
+ .hsync_len = { 40, 168, 767 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 8, 100 },
+ .vback_porch = { 4, 8, 100 },
+ .vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
- .modes = &auo_g101evn010_mode,
- .num_modes = 1,
+ .timings = &auo_g101evn010_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -1268,6 +1305,30 @@ static const struct panel_desc auo_g190ean01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_p238han01_timings = {
+ .pixelclock = { 107400000, 142400000, 180000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 30, 70, 650 },
+ .hback_porch = { 30, 70, 650 },
+ .hsync_len = { 20, 40, 136 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 5, 19, 318 },
+ .vback_porch = { 5, 19, 318 },
+ .vsync_len = { 4, 12, 120 },
+};
+
+static const struct panel_desc auo_p238han01 = {
+ .timings = &auo_p238han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 527,
+ .height = 296,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -2198,13 +2259,14 @@ static const struct display_timing evervision_vgg644804_timing = {
static const struct panel_desc evervision_vgg644804 = {
.timings = &evervision_vgg644804_timing,
.num_timings = 1,
- .bpc = 8,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing evervision_vgg804821_timing = {
@@ -3527,6 +3589,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode nlt_nl13676bc25_03f_mode = {
+ .clock = 75400,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 14,
+ .hsync_end = 1366 + 14 + 56,
+ .htotal = 1366 + 14 + 56 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 1,
+ .vsync_end = 768 + 1 + 3,
+ .vtotal = 768 + 1 + 3 + 22,
+};
+
+static const struct panel_desc nlt_nl13676bc25_03f = {
+ .modes = &nlt_nl13676bc25_03f_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 363,
+ .height = 215,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -3796,6 +3882,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t004_zza01_mode = {
+ .clock = 71150,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 8,
+ .vtotal = 800 + 9 + 8 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t004_zza01 = {
+ .modes = &powertip_ph128800t004_zza01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
@@ -4393,10 +4505,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
};
/*
- * The datasheet computes total blanking as back porch + front porch, not
- * including sync pulse width. This is for both H and V. To make the total
- * blanking and period correct, subtract the pulse width from the front
- * porch.
+ * The TM070JDHG34-00 datasheet computes total blanking as back porch +
+ * front porch, not including sync pulse width. This is for both H and
+ * V. To make the total blanking and period correct, subtract the pulse
+ * width from the front porch.
*
* This works well for the Min and Typ values, but for Max values the sync
* pulse width is higher than back porch + front porch, so work around that
@@ -4405,6 +4517,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
*
* Exact datasheet values are added as a comment where they differ from the
* ones implemented for the above reason.
+ *
+ * The P0700WXF1MBAA datasheet is even less detailed, only listing period
+ * and total blanking time, however the resulting values are the same as
+ * the TM070JDHG34-00.
*/
static const struct display_timing tianma_tm070jdhg34_00_timing = {
.pixelclock = { 68400000, 71900000, 78100000 },
@@ -4427,6 +4543,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = {
.width = 150, /* 149.76 */
.height = 94, /* 93.60 */
},
+ .delay = {
+ .prepare = 15, /* Tp1 */
+ .enable = 150, /* Tp2 */
+ .disable = 150, /* Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc tianma_p0700wxf1mbaa = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .delay = {
+ .prepare = 18, /* Tr + Tp1 */
+ .enable = 152, /* Tp2 + Tp5 */
+ .disable = 152, /* Tp6 + Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -4860,6 +5000,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g190ean01",
.data = &auo_g190ean01,
}, {
+ .compatible = "auo,p238han01",
+ .data = &auo_p238han01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
@@ -5121,6 +5264,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
+ .compatible = "nlt,nl13676bc25-03f",
+ .data = &nlt_nl13676bc25_03f,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -5154,6 +5300,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t004-zza01",
+ .data = &powertip_ph128800t004_zza01,
+ }, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
@@ -5214,6 +5363,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
+ .compatible = "tianma,p0700wxf1mbaa",
+ .data = &tianma_p0700wxf1mbaa,
+ }, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
@@ -5279,7 +5431,12 @@ static const struct of_device_id platform_of_match[] = {
}, {
/* Must be the last entry */
.compatible = "panel-dpi",
- .data = &panel_dpi,
+
+ /*
+ * Explicitly NULL, the panel_desc structure will be
+ * allocated by panel_dpi_probe().
+ */
+ .data = NULL,
}, {
/* sentinel */
}
@@ -5288,13 +5445,13 @@ MODULE_DEVICE_TABLE(of, platform_of_match);
static int panel_simple_platform_probe(struct platform_device *pdev)
{
- const struct panel_desc *desc;
+ struct panel_simple *panel;
- desc = of_device_get_match_data(&pdev->dev);
- if (!desc)
- return -ENODEV;
+ panel = panel_simple_probe(&pdev->dev);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
- return panel_simple_probe(&pdev->dev, desc);
+ return 0;
}
static void panel_simple_platform_remove(struct platform_device *pdev)
@@ -5324,14 +5481,6 @@ static struct platform_driver panel_simple_platform_driver = {
.shutdown = panel_simple_platform_shutdown,
};
-struct panel_desc_dsi {
- struct panel_desc desc;
-
- unsigned long flags;
- enum mipi_dsi_pixel_format format;
- unsigned int lanes;
-};
-
static const struct drm_display_mode auo_b080uan01_mode = {
.clock = 154500,
.hdisplay = 1200,
@@ -5565,16 +5714,14 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct panel_desc_dsi *desc;
+ struct panel_simple *panel;
int err;
- desc = of_device_get_match_data(&dsi->dev);
- if (!desc)
- return -ENODEV;
-
- err = panel_simple_probe(&dsi->dev, &desc->desc);
- if (err < 0)
- return err;
+ panel = panel_simple_probe(&dsi->dev);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+ desc = container_of(panel->desc, struct panel_desc_dsi, desc);
dsi->mode_flags = desc->flags;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 1f72ef7ca74c..2f79ec4a2063 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -520,6 +520,28 @@ static void rg28xx_gip_sequence(struct st7701 *st7701)
st7701_switch_cmd_bkx(st7701, false, 0);
}
+static void wf40eswaa6mnn0_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x28, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0xA0, 0x00, 0x00, 0x07, 0xA0, 0x00,
+ 0x00, 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x11, 0x11, 0x44, 0x44, 0xED, 0xA0, 0x00,
+ 0x00, 0xEC, 0xA0, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0A, 0xE9, 0xD8, 0xA0, 0x0C, 0xEB, 0xD8,
+ 0xA0, 0x0E, 0xED, 0xD8, 0xA0, 0x10, 0xEF, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x09, 0xE8, 0xD8, 0xA0, 0x0B, 0xEA, 0xD8,
+ 0xA0, 0x0D, 0xEC, 0xD8, 0xA0, 0x0F, 0xEE, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x88, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x3C, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x02, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x45, 0x67, 0x98, 0xBA);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -1135,6 +1157,107 @@ static const struct st7701_panel_desc rg28xx_desc = {
.gip_sequence = rg28xx_gip_sequence,
};
+static const struct drm_display_mode wf40eswaa6mnn0_mode = {
+ .clock = 18306,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 45,
+ .htotal = 480 + 2 + 45 + 13,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 70,
+ .vtotal = 480 + 2 + 70 + 13,
+
+ .width_mm = 72,
+ .height_mm = 70,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc wf40eswaa6mnn0_desc = {
+ .mode = &wf40eswaa6mnn0_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 0,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0x1),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x04),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x14),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xb3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3a),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x19),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0f),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x02),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x0f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xa3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x0d)
+ },
+ .nlinv = 3,
+ .vop_uv = 4737500,
+ .vcom_uv = 662500,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4600,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIDDLE,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = wf40eswaa6mnn0_gip_sequence,
+};
+
static void st7701_cleanup(void *data)
{
struct st7701 *st7701 = (struct st7701 *)data;
@@ -1150,9 +1273,10 @@ static int st7701_probe(struct device *dev, int connector_type)
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
- if (!st7701)
- return -ENOMEM;
+ st7701 = devm_drm_panel_alloc(dev, struct st7701, panel, &st7701_funcs,
+ connector_type);
+ if (IS_ERR(st7701))
+ return PTR_ERR(st7701);
desc = of_device_get_match_data(dev);
if (!desc)
@@ -1176,7 +1300,6 @@ static int st7701_probe(struct device *dev, int connector_type)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
st7701->panel.prepare_prev_first = true;
/**
@@ -1265,6 +1388,7 @@ static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
+ { .compatible = "winstar,wf40eswaa6mnn0", .data = &wf40eswaa6mnn0_desc },
{ }
};
MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 67e8e45498cb..1a007a244d84 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -846,9 +846,11 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
struct st7703 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7703, panel,
+ &st7703_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -876,9 +878,6 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
- drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 28bfc48a9127..04d91929eedd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -612,9 +612,10 @@ static int st7789v_probe(struct spi_device *spi)
struct st7789v *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7789v, panel,
+ &st7789v_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
@@ -626,9 +627,6 @@ static int st7789v_probe(struct spi_device *spi)
ctx->info = device_get_match_data(&spi->dev);
- drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ctx->power = devm_regulator_get(dev, "power");
ret = PTR_ERR_OR_ZERO(ctx->power);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d437f5c84f5f..fe043de791b0 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -607,9 +607,10 @@ static int acx565akm_probe(struct spi_device *spi)
struct acx565akm_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct acx565akm_panel, panel,
+ &acx565akm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
spi->mode = SPI_MODE_3;
@@ -635,9 +636,6 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 97f4bb4e1029..7c989b70ab51 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -175,9 +175,11 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
struct sony_td4353_jdi *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sony_td4353_jdi, panel,
+ &sony_td4353_jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->type = (uintptr_t)of_device_get_match_data(dev);
@@ -206,9 +208,6 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 104b2290560e..216a6ad8696e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -433,9 +433,11 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
struct truly_nt35521 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct truly_nt35521, panel,
+ &truly_nt35521_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "positive5";
ctx->supplies[1].supply = "negative5";
@@ -465,9 +467,6 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index e780faee1857..4854437e2899 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -68,9 +68,11 @@ static int summit_probe(struct mipi_dsi_device *dsi)
struct summit_data *s_data;
int ret;
- s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
- if (!s_data)
- return -ENOMEM;
+ s_data = devm_drm_panel_alloc(dev, struct summit_data, panel,
+ &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s_data))
+ return PTR_ERR(s_data);
mipi_dsi_set_drvdata(dsi, s_data);
s_data->dsi = dsi;
@@ -85,8 +87,6 @@ static int summit_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(s_data->bl))
return PTR_ERR(s_data->bl);
- drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&s_data->panel);
return mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 17349825543f..3a74d48753d9 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel)
static int r63353_panel_activate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int i, ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int i;
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
- usleep_range(15000, 17000);
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
for (i = 0; i < rpanel->pdata->init_length; i++) {
const struct r63353_instr *instr = &rpanel->pdata->init[i];
- ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
- if (ret < 0)
- goto fail;
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data,
+ instr->len);
}
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- usleep_range(5000, 10000);
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display ON (%d)\n", ret);
- goto fail;
- }
-
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-fail:
- gpiod_set_value(rpanel->reset_gpio, 0);
+ if (dsi_ctx.accum_err)
+ gpiod_set_value(rpanel->reset_gpio, 0);
- return ret;
+ return dsi_ctx.accum_err;
}
static int r63353_panel_prepare(struct drm_panel *panel)
@@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel)
return 0;
}
-static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+static void r63353_panel_deactivate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display OFF (%d)\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- usleep_range(5000, 10000);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
static int r63353_panel_unprepare(struct drm_panel *panel)
@@ -259,9 +229,11 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
struct r63353_panel *panel;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct r63353_panel, base,
+ &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
panel->dsi = dsi;
@@ -288,9 +260,6 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(panel->reset_gpio);
}
- drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
panel->base.prepare_prev_first = true;
ret = drm_panel_of_backlight(&panel->base);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 4dbf8b88f264..ee86ff20c1bd 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,11 +86,7 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-/*
- * noinline_for_stack so we don't get multiple copies of tx_buf
- * on the stack in case of gcc-plugin-structleak
- */
-static int noinline_for_stack
+static int
jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
@@ -322,9 +318,11 @@ static int td028ttec1_probe(struct spi_device *spi)
struct td028ttec1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td028ttec1_panel, panel,
+ &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -338,9 +336,6 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index cf4609bb9b1d..b18af526b54c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -421,9 +421,10 @@ static int td043mtea1_probe(struct spi_device *spi)
struct td043mtea1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (lcd == NULL)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td043mtea1_panel, panel,
+ &td043mtea1_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -455,9 +456,6 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index f6a212e542cb..0beba5c08956 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -405,9 +405,11 @@ static int tpg110_probe(struct spi_device *spi)
struct tpg110 *tpg;
int ret;
- tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL);
- if (!tpg)
- return -ENOMEM;
+ tpg = devm_drm_panel_alloc(dev, struct tpg110, panel,
+ &tpg110_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(tpg))
+ return PTR_ERR(tpg);
+
tpg->dev = dev;
/* We get the physical display dimensions from the DT */
@@ -438,9 +440,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&tpg->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
new file mode 100644
index 000000000000..413849f7b4de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct visionox_g2647fb105 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline
+struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_g2647fb105, panel);
+}
+
+static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_prepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ visionox_g2647fb105_reset(ctx);
+
+ ret = visionox_g2647fb105_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int visionox_g2647fb105_unprepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = visionox_g2647fb105_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_g2647fb105_mode = {
+ .clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 4,
+ .htotal = 1080 + 28 + 4 + 36,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 8,
+ .vsync_end = 2340 + 8 + 4,
+ .vtotal = 2340 + 8 + 4 + 4,
+ .width_mm = 69,
+ .height_mm = 149,
+};
+
+static int visionox_g2647fb105_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = {
+ .prepare = visionox_g2647fb105_prepare,
+ .unprepare = visionox_g2647fb105_unprepare,
+ .get_modes = visionox_g2647fb105_get_modes,
+};
+
+static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_g2647fb105_bl_ops = {
+ .update_status = visionox_g2647fb105_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_g2647fb105_bl_ops, &props);
+}
+
+static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_g2647fb105 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(visionox_g2647fb105_supplies),
+ visionox_g2647fb105_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_g2647fb105_of_match[] = {
+ { .compatible = "visionox,g2647fb105" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match);
+
+static struct mipi_dsi_driver visionox_g2647fb105_driver = {
+ .probe = visionox_g2647fb105_probe,
+ .remove = visionox_g2647fb105_remove,
+ .driver = {
+ .name = "panel-visionox-g2647fb105",
+ .of_match_table = visionox_g2647fb105_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_g2647fb105_driver);
+
+MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 3ea0a86f6e69..690cccedd438 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -255,9 +255,11 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
struct drm_dsc_config *dsc;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_r66451, panel,
+ &visionox_r66451_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
if (!dsc)
@@ -297,7 +299,6 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index be3a9797fbce..909c280eab1f 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -15,11 +16,138 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct visionox_rm69299_panel_desc {
+ const struct drm_display_mode *mode;
+ const u8 *init_seq;
+ unsigned int init_seq_len;
+};
+
struct visionox_rm69299 {
struct drm_panel panel;
- struct regulator_bulk_data supplies[2];
+ struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
+ const struct visionox_rm69299_panel_desc *desc;
+};
+
+static const struct regulator_bulk_data visionox_rm69299_supplies[] = {
+ { .supply = "vdda", .init_load_uA = 32000 },
+ { .supply = "vdd3p3", .init_load_uA = 13200 },
+};
+
+static const u8 visionox_rm69299_1080x2248_60hz_init_seq[][2] = {
+ { 0xfe, 0x00 }, { 0xc2, 0x08 }, { 0x35, 0x00 }, { 0x51, 0xff },
+};
+
+static const u8 visionox_rm69299_1080x2160_60hz_init_seq[][2] = {
+ { 0xfe, 0x40 }, { 0x05, 0x04 }, { 0x06, 0x08 }, { 0x08, 0x04 },
+ { 0x09, 0x08 }, { 0x0a, 0x07 }, { 0x0b, 0xcc }, { 0x0c, 0x07 },
+ { 0x0d, 0x90 }, { 0x0f, 0x87 }, { 0x20, 0x8d }, { 0x21, 0x8d },
+ { 0x24, 0x05 }, { 0x26, 0x05 }, { 0x28, 0x05 }, { 0x2a, 0x05 },
+ { 0x2d, 0x28 }, { 0x2f, 0x28 }, { 0x30, 0x32 }, { 0x31, 0x32 },
+ { 0x37, 0x80 }, { 0x38, 0x30 }, { 0x39, 0xa8 }, { 0x46, 0x48 },
+ { 0x47, 0x48 }, { 0x6b, 0x10 }, { 0x6f, 0x02 }, { 0x74, 0x2b },
+ { 0x80, 0x1a }, { 0xfe, 0x40 }, { 0x93, 0x10 }, { 0x16, 0x00 },
+ { 0x85, 0x07 }, { 0x84, 0x01 }, { 0x86, 0x0f }, { 0x87, 0x05 },
+ { 0x8c, 0x00 }, { 0x88, 0x2e }, { 0x89, 0x2e }, { 0x8b, 0x09 },
+ { 0x95, 0x00 }, { 0x91, 0x00 }, { 0x90, 0x00 }, { 0x8d, 0xd0 },
+ { 0x8a, 0x03 }, { 0xfe, 0xa0 }, { 0x13, 0x00 }, { 0x33, 0x00 },
+ { 0x0b, 0x33 }, { 0x36, 0x1e }, { 0x31, 0x88 }, { 0x32, 0x88 },
+ { 0x37, 0xf1 }, { 0xfe, 0x50 }, { 0x00, 0x00 }, { 0x01, 0x00 },
+ { 0x02, 0x00 }, { 0x03, 0xe9 }, { 0x04, 0x00 }, { 0x05, 0xf6 },
+ { 0x06, 0x01 }, { 0x07, 0x2c }, { 0x08, 0x01 }, { 0x09, 0x62 },
+ { 0x0a, 0x01 }, { 0x0b, 0x98 }, { 0x0c, 0x01 }, { 0x0d, 0xbf },
+ { 0x0e, 0x01 }, { 0x0f, 0xf6 }, { 0x10, 0x02 }, { 0x11, 0x24 },
+ { 0x12, 0x02 }, { 0x13, 0x4e }, { 0x14, 0x02 }, { 0x15, 0x70 },
+ { 0x16, 0x02 }, { 0x17, 0xaf }, { 0x18, 0x02 }, { 0x19, 0xe2 },
+ { 0x1a, 0x03 }, { 0x1b, 0x1f }, { 0x1c, 0x03 }, { 0x1d, 0x52 },
+ { 0x1e, 0x03 }, { 0x1f, 0x82 }, { 0x20, 0x03 }, { 0x21, 0xb6 },
+ { 0x22, 0x03 }, { 0x23, 0xf0 }, { 0x24, 0x04 }, { 0x25, 0x1f },
+ { 0x26, 0x04 }, { 0x27, 0x37 }, { 0x28, 0x04 }, { 0x29, 0x59 },
+ { 0x2a, 0x04 }, { 0x2b, 0x68 }, { 0x30, 0x04 }, { 0x31, 0x85 },
+ { 0x32, 0x04 }, { 0x33, 0xa2 }, { 0x34, 0x04 }, { 0x35, 0xbc },
+ { 0x36, 0x04 }, { 0x37, 0xd8 }, { 0x38, 0x04 }, { 0x39, 0xf4 },
+ { 0x3a, 0x05 }, { 0x3b, 0x0e }, { 0x40, 0x05 }, { 0x41, 0x13 },
+ { 0x42, 0x05 }, { 0x43, 0x1f }, { 0x44, 0x05 }, { 0x45, 0x1f },
+ { 0x46, 0x00 }, { 0x47, 0x00 }, { 0x48, 0x01 }, { 0x49, 0x43 },
+ { 0x4a, 0x01 }, { 0x4b, 0x4c }, { 0x4c, 0x01 }, { 0x4d, 0x6f },
+ { 0x4e, 0x01 }, { 0x4f, 0x92 }, { 0x50, 0x01 }, { 0x51, 0xb5 },
+ { 0x52, 0x01 }, { 0x53, 0xd4 }, { 0x58, 0x02 }, { 0x59, 0x06 },
+ { 0x5a, 0x02 }, { 0x5b, 0x33 }, { 0x5c, 0x02 }, { 0x5d, 0x59 },
+ { 0x5e, 0x02 }, { 0x5f, 0x7d }, { 0x60, 0x02 }, { 0x61, 0xbd },
+ { 0x62, 0x02 }, { 0x63, 0xf7 }, { 0x64, 0x03 }, { 0x65, 0x31 },
+ { 0x66, 0x03 }, { 0x67, 0x63 }, { 0x68, 0x03 }, { 0x69, 0x9d },
+ { 0x6a, 0x03 }, { 0x6b, 0xd2 }, { 0x6c, 0x04 }, { 0x6d, 0x05 },
+ { 0x6e, 0x04 }, { 0x6f, 0x38 }, { 0x70, 0x04 }, { 0x71, 0x51 },
+ { 0x72, 0x04 }, { 0x73, 0x70 }, { 0x74, 0x04 }, { 0x75, 0x85 },
+ { 0x76, 0x04 }, { 0x77, 0xa1 }, { 0x78, 0x04 }, { 0x79, 0xc0 },
+ { 0x7a, 0x04 }, { 0x7b, 0xd8 }, { 0x7c, 0x04 }, { 0x7d, 0xf2 },
+ { 0x7e, 0x05 }, { 0x7f, 0x10 }, { 0x80, 0x05 }, { 0x81, 0x21 },
+ { 0x82, 0x05 }, { 0x83, 0x2e }, { 0x84, 0x05 }, { 0x85, 0x3a },
+ { 0x86, 0x05 }, { 0x87, 0x3e }, { 0x88, 0x00 }, { 0x89, 0x00 },
+ { 0x8a, 0x01 }, { 0x8b, 0x86 }, { 0x8c, 0x01 }, { 0x8d, 0x8f },
+ { 0x8e, 0x01 }, { 0x8f, 0xb3 }, { 0x90, 0x01 }, { 0x91, 0xd7 },
+ { 0x92, 0x01 }, { 0x93, 0xfb }, { 0x94, 0x02 }, { 0x95, 0x18 },
+ { 0x96, 0x02 }, { 0x97, 0x4f }, { 0x98, 0x02 }, { 0x99, 0x7e },
+ { 0x9a, 0x02 }, { 0x9b, 0xa6 }, { 0x9c, 0x02 }, { 0x9d, 0xcf },
+ { 0x9e, 0x03 }, { 0x9f, 0x14 }, { 0xa4, 0x03 }, { 0xa5, 0x52 },
+ { 0xa6, 0x03 }, { 0xa7, 0x93 }, { 0xac, 0x03 }, { 0xad, 0xcf },
+ { 0xae, 0x04 }, { 0xaf, 0x08 }, { 0xb0, 0x04 }, { 0xb1, 0x42 },
+ { 0xb2, 0x04 }, { 0xb3, 0x7f }, { 0xb4, 0x04 }, { 0xb5, 0xb4 },
+ { 0xb6, 0x04 }, { 0xb7, 0xcc }, { 0xb8, 0x04 }, { 0xb9, 0xf2 },
+ { 0xba, 0x05 }, { 0xbb, 0x0c }, { 0xbc, 0x05 }, { 0xbd, 0x26 },
+ { 0xbe, 0x05 }, { 0xbf, 0x4b }, { 0xc0, 0x05 }, { 0xc1, 0x64 },
+ { 0xc2, 0x05 }, { 0xc3, 0x83 }, { 0xc4, 0x05 }, { 0xc5, 0xa1 },
+ { 0xc6, 0x05 }, { 0xc7, 0xba }, { 0xc8, 0x05 }, { 0xc9, 0xc4 },
+ { 0xca, 0x05 }, { 0xcb, 0xd5 }, { 0xcc, 0x05 }, { 0xcd, 0xd5 },
+ { 0xce, 0x00 }, { 0xcf, 0xce }, { 0xd0, 0x00 }, { 0xd1, 0xdb },
+ { 0xd2, 0x01 }, { 0xd3, 0x32 }, { 0xd4, 0x01 }, { 0xd5, 0x3b },
+ { 0xd6, 0x01 }, { 0xd7, 0x74 }, { 0xd8, 0x01 }, { 0xd9, 0x7d },
+ { 0xfe, 0x60 }, { 0x00, 0xcc }, { 0x01, 0x0f }, { 0x02, 0xff },
+ { 0x03, 0x01 }, { 0x04, 0x00 }, { 0x05, 0x02 }, { 0x06, 0x00 },
+ { 0x07, 0x00 }, { 0x09, 0xc4 }, { 0x0a, 0x00 }, { 0x0b, 0x04 },
+ { 0x0c, 0x01 }, { 0x0d, 0x00 }, { 0x0e, 0x04 }, { 0x0f, 0x00 },
+ { 0x10, 0x71 }, { 0x12, 0xc4 }, { 0x13, 0x00 }, { 0x14, 0x04 },
+ { 0x15, 0x01 }, { 0x16, 0x00 }, { 0x17, 0x06 }, { 0x18, 0x00 },
+ { 0x19, 0x71 }, { 0x1b, 0xc4 }, { 0x1c, 0x00 }, { 0x1d, 0x02 },
+ { 0x1e, 0x00 }, { 0x1f, 0x00 }, { 0x20, 0x08 }, { 0x21, 0x66 },
+ { 0x22, 0xb4 }, { 0x24, 0xc4 }, { 0x25, 0x00 }, { 0x26, 0x02 },
+ { 0x27, 0x00 }, { 0x28, 0x00 }, { 0x29, 0x07 }, { 0x2a, 0x66 },
+ { 0x2b, 0xb4 }, { 0x2f, 0xc4 }, { 0x30, 0x00 }, { 0x31, 0x04 },
+ { 0x32, 0x01 }, { 0x33, 0x00 }, { 0x34, 0x03 }, { 0x35, 0x00 },
+ { 0x36, 0x71 }, { 0x38, 0xc4 }, { 0x39, 0x00 }, { 0x3a, 0x04 },
+ { 0x3b, 0x01 }, { 0x3d, 0x00 }, { 0x3f, 0x05 }, { 0x40, 0x00 },
+ { 0x41, 0x71 }, { 0x83, 0xce }, { 0x84, 0x02 }, { 0x85, 0x20 },
+ { 0x86, 0xdc }, { 0x87, 0x00 }, { 0x88, 0x04 }, { 0x89, 0x00 },
+ { 0x8a, 0xbb }, { 0x8b, 0x80 }, { 0xc7, 0x0e }, { 0xc8, 0x05 },
+ { 0xc9, 0x1f }, { 0xca, 0x06 }, { 0xcb, 0x00 }, { 0xcc, 0x03 },
+ { 0xcd, 0x04 }, { 0xce, 0x1f }, { 0xcf, 0x1f }, { 0xd0, 0x1f },
+ { 0xd1, 0x1f }, { 0xd2, 0x1f }, { 0xd3, 0x1f }, { 0xd4, 0x1f },
+ { 0xd5, 0x1f }, { 0xd6, 0x1f }, { 0xd7, 0x17 }, { 0xd8, 0x1f },
+ { 0xd9, 0x16 }, { 0xda, 0x1f }, { 0xdb, 0x0e }, { 0xdc, 0x01 },
+ { 0xdd, 0x1f }, { 0xde, 0x02 }, { 0xdf, 0x00 }, { 0xe0, 0x03 },
+ { 0xe1, 0x04 }, { 0xe2, 0x1f }, { 0xe3, 0x1f }, { 0xe4, 0x1f },
+ { 0xe5, 0x1f }, { 0xe6, 0x1f }, { 0xe7, 0x1f }, { 0xe8, 0x1f },
+ { 0xe9, 0x1f }, { 0xea, 0x1f }, { 0xeb, 0x17 }, { 0xec, 0x1f },
+ { 0xed, 0x16 }, { 0xee, 0x1f }, { 0xef, 0x03 }, { 0xfe, 0x70 },
+ { 0x5a, 0x0b }, { 0x5b, 0x0b }, { 0x5c, 0x55 }, { 0x5d, 0x24 },
+ { 0xfe, 0x90 }, { 0x12, 0x24 }, { 0x13, 0x49 }, { 0x14, 0x92 },
+ { 0x15, 0x86 }, { 0x16, 0x61 }, { 0x17, 0x18 }, { 0x18, 0x24 },
+ { 0x19, 0x49 }, { 0x1a, 0x92 }, { 0x1b, 0x86 }, { 0x1c, 0x61 },
+ { 0x1d, 0x18 }, { 0x1e, 0x24 }, { 0x1f, 0x49 }, { 0x20, 0x92 },
+ { 0x21, 0x86 }, { 0x22, 0x61 }, { 0x23, 0x18 }, { 0xfe, 0x40 },
+ { 0x0e, 0x10 }, { 0xfe, 0xa0 }, { 0x04, 0x80 }, { 0x16, 0x00 },
+ { 0x26, 0x10 }, { 0x2f, 0x37 }, { 0xfe, 0xd0 }, { 0x06, 0x0f },
+ { 0x4b, 0x00 }, { 0x56, 0x4a }, { 0xfe, 0x00 }, { 0xc2, 0x09 },
+ { 0x35, 0x00 }, { 0xfe, 0x70 }, { 0x7d, 0x61 }, { 0x7f, 0x00 },
+ { 0x7e, 0x4e }, { 0x52, 0x2c }, { 0x49, 0x00 }, { 0x4a, 0x00 },
+ { 0x4b, 0x00 }, { 0x4c, 0x00 }, { 0x4d, 0xe8 }, { 0x4e, 0x25 },
+ { 0x4f, 0x6e }, { 0x50, 0xae }, { 0x51, 0x2f }, { 0xad, 0xf4 },
+ { 0xae, 0x8f }, { 0xaf, 0x00 }, { 0xb0, 0x54 }, { 0xb1, 0x3a },
+ { 0xb2, 0x00 }, { 0xb3, 0x00 }, { 0xb4, 0x00 }, { 0xb5, 0x00 },
+ { 0xb6, 0x18 }, { 0xb7, 0x30 }, { 0xb8, 0x4a }, { 0xb9, 0x98 },
+ { 0xba, 0x30 }, { 0xbb, 0x60 }, { 0xbc, 0x50 }, { 0xbd, 0x00 },
+ { 0xbe, 0x00 }, { 0xbf, 0x39 }, { 0xfe, 0x00 }, { 0x51, 0x66 },
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
@@ -31,7 +159,8 @@ static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
{
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
if (ret < 0)
return ret;
@@ -54,37 +183,32 @@ static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
{
gpiod_set_value(ctx->reset_gpio, 0);
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return regulator_bulk_disable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
}
static int visionox_rm69299_unprepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
ctx->dsi->mode_flags = 0;
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
- if (ret < 0)
- dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
/* 120ms delay required here as per DCS spec */
- msleep(120);
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = visionox_rm69299_power_off(ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return ret;
+ return visionox_rm69299_power_off(ctx);
}
static int visionox_rm69299_prepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret, i;
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
@@ -92,52 +216,20 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
- goto power_off;
- }
+ for (i = 0; i < ctx->desc->init_seq_len; i++)
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, &ctx->desc->init_seq[i * 2], 2);
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
-
-power_off:
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
@@ -154,14 +246,26 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
.flags = 0,
};
+static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 8,
+ .vsync_end = 2160 + 8 + 4,
+ .vtotal = 2160 + 8 + 4 + 4,
+ .flags = 0,
+};
+
static int visionox_rm69299_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev,
- &visionox_rm69299_1080x2248_60hz);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
@@ -187,20 +291,22 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm69299, panel,
+ &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->desc = device_get_match_data(dev);
+ if (!ctx->desc)
+ return -EINVAL;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- ctx->supplies[0].supply = "vdda";
- ctx->supplies[0].init_load_uA = 32000;
- ctx->supplies[1].supply = "vdd3p3";
- ctx->supplies[1].init_load_uA = 13200;
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(visionox_rm69299_supplies),
+ visionox_rm69299_supplies, &ctx->supplies);
if (ret < 0)
return ret;
@@ -210,8 +316,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -239,8 +343,23 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
}
+const struct visionox_rm69299_panel_desc visionox_rm69299_1080p_display_desc = {
+ .mode = &visionox_rm69299_1080x2248_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2248_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2248_60hz_init_seq),
+};
+
+const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
+ .mode = &visionox_rm69299_1080x2160_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+};
+
static const struct of_device_id visionox_rm69299_of_match[] = {
- { .compatible = "visionox,rm69299-1080p-display", },
+ { .compatible = "visionox,rm69299-1080p-display",
+ .data = &visionox_rm69299_1080p_display_desc },
+ { .compatible = "visionox,rm69299-shift",
+ .data = &visionox_rm69299_shift_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
index 4db7fa8d74c4..e53645d59413 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -360,9 +360,11 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
struct visionox_rm692e5 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm692e5, panel,
+ &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_rm692e5_supplies),
@@ -383,8 +385,6 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 17b8defe79c1..97a79411e1ec 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -248,9 +248,11 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
struct visionox_vtdr6130 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_vtdr6130, panel,
+ &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_vtdr6130_supplies),
@@ -273,9 +275,6 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 2591ff8f0d4e..dd74610bd2eb 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -347,9 +347,11 @@ static int ws2401_probe(struct spi_device *spi)
struct ws2401 *ws;
int ret;
- ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL);
- if (!ws)
- return -ENOMEM;
+ ws = devm_drm_panel_alloc(dev, struct ws2401, panel, &ws2401_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
ws->dev = dev;
/*
@@ -379,9 +381,6 @@ static int ws2401_probe(struct spi_device *spi)
ws2401_read_mtp_id(ws);
ws2401_power_off(ws);
- drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ws->panel);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2b91414c2829..fc6516373b5d 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -241,9 +241,10 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
struct xpp055c272 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct xpp055c272, panel,
+ &xpp055c272_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -269,9 +270,6 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 3385fd3ef41a..5d0dce10336b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,7 +29,7 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *ptdev = dev_get_drvdata(dev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
@@ -40,7 +40,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
- ptdev->pfdevfreq.current_frequency = *freq;
+ pfdev->pfdevfreq.current_frequency = *freq;
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index a45e4addcc19..04bec27449cb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,10 +209,25 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+#ifdef CONFIG_DEBUG_FS
+ mutex_init(&pfdev->debugfs.gems_lock);
+ INIT_LIST_HEAD(&pfdev->debugfs.gems_list);
+#endif
+
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ return err;
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto out_pm_domain;
+ }
+
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
- return err;
+ goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
@@ -229,25 +244,15 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- err = panfrost_reset_init(pfdev);
- if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
- goto out_regulator;
- }
-
- err = panfrost_pm_domain_init(pfdev);
- if (err)
- goto out_reset;
-
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
- goto out_pm_domain;
+ goto out_regulator;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto out_pm_domain;
+ goto out_regulator;
err = panfrost_mmu_init(pfdev);
if (err)
@@ -268,16 +273,16 @@ out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
panfrost_gpu_fini(pfdev);
-out_pm_domain:
- panfrost_pm_domain_fini(pfdev);
-out_reset:
- panfrost_reset_fini(pfdev);
out_regulator:
panfrost_regulator_fini(pfdev);
out_devfreq:
panfrost_devfreq_fini(pfdev);
out_clk:
panfrost_clk_fini(pfdev);
+out_reset:
+ panfrost_reset_fini(pfdev);
+out_pm_domain:
+ panfrost_pm_domain_fini(pfdev);
return err;
}
@@ -287,11 +292,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
- panfrost_pm_domain_fini(pfdev);
- panfrost_reset_fini(pfdev);
panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
}
#define PANFROST_EXCEPTION(id) \
@@ -406,11 +411,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ ret = reset_control_deassert(pfdev->rstc);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
+
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ reset_control_assert(pfdev->rstc);
+ return ret;
}
static int panfrost_device_runtime_suspend(struct device *dev)
@@ -426,6 +456,14 @@ static int panfrost_device_runtime_suspend(struct device *dev)
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ reset_control_assert(pfdev->rstc);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index cffcb0ac7c11..077525a3ad68 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -36,10 +36,21 @@ enum panfrost_drv_comp_bits {
* enum panfrost_gpu_pm - Supported kernel power management features
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
* @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during
+ * system runtime suspend
*/
enum panfrost_gpu_pm {
GPU_PM_CLK_DIS,
GPU_PM_VREG_OFF,
+ GPU_PM_RT
+};
+
+/**
+ * enum panfrost_gpu_quirks - GPU optional quirks
+ * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format
+ */
+enum panfrost_gpu_quirks {
+ GPU_QUIRK_FORCE_AARCH64_PGTABLE,
};
struct panfrost_features {
@@ -95,6 +106,20 @@ struct panfrost_compatible {
/* Allowed PM features */
u8 pm_features;
+
+ /* GPU configuration quirks */
+ u8 gpu_quirks;
+};
+
+/**
+ * struct panfrost_device_debugfs - Device-wide DebugFS tracking structures
+ */
+struct panfrost_device_debugfs {
+ /** @gems_list: Device-wide list of GEM objects owned by at least one file. */
+ struct list_head gems_list;
+
+ /** @gems_lock: Serializes access to the device-wide list of GEM objects. */
+ struct mutex gems_lock;
};
struct panfrost_device {
@@ -150,6 +175,10 @@ struct panfrost_device {
atomic_t use_count;
spinlock_t lock;
} cycle_counter;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_device_debugfs debugfs;
+#endif
};
struct panfrost_mmu {
@@ -162,6 +191,11 @@ struct panfrost_mmu {
int as;
atomic_t as_count;
struct list_head list;
+ struct {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+ } cfg;
};
struct panfrost_engine_usage {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 0f3935556ac7..1ea6c509a5d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
@@ -312,7 +313,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- 1, NULL);
+ 1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -476,7 +477,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
+ args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
@@ -495,6 +496,46 @@ out_put_object:
return ret;
}
+static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panfrost_set_label_bo *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user(u64_to_user_ptr(args->label),
+ PANFROST_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panfrost_gem_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -561,6 +602,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -618,6 +660,37 @@ static const struct file_operations panfrost_drm_driver_fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ panfrost_gem_debugfs_print_bos(pfdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
+static void panfrost_debugfs_init(struct drm_minor *minor)
+{
+ panthor_gems_debugfs_init(minor);
+}
+#endif
+
/*
* Panfrost driver version:
* - 1.0 - initial interface
@@ -625,6 +698,7 @@ static const struct file_operations panfrost_drm_driver_fops = {
* - 1.2 - adds AFBC_FEATURES query
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
+ * - 1.4 - adds SET_LABEL_BO
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -637,10 +711,13 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panfrost_debugfs_init,
+#endif
};
static int panfrost_probe(struct platform_device *pdev)
@@ -776,12 +853,21 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
};
+static const struct panfrost_compatible allwinner_h616_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 1,
+ .pm_features = BIT(GPU_PM_RT),
+};
+
static const struct panfrost_compatible amlogic_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
+static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2",
+ "core3", "core4" };
/*
* The old data with two power supplies for MT8183 is here only to
* keep retro-compatibility with older devicetrees, as DVFS will
@@ -790,51 +876,55 @@ static const struct panfrost_compatible amlogic_data = {
* On new devicetrees please use the _b variant with a single and
* coupled regulators instead.
*/
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
-static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const legacy_supplies[] = { "mali", "sram", NULL };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
- .supply_names = mediatek_mt8183_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(legacy_supplies) - 1,
+ .supply_names = legacy_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
};
-static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL };
static const struct panfrost_compatible mediatek_mt8183_b_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
static const struct panfrost_compatible mediatek_mt8186_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
- .pm_domain_names = mediatek_mt8186_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-/* MT8188 uses the same power domains and power supplies as MT8183 */
static const struct panfrost_compatible mediatek_mt8188_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
-static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
-static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2",
- "core3", "core4" };
static const struct panfrost_compatible mediatek_mt8192_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1,
- .supply_names = mediatek_mt8192_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
- .pm_domain_names = mediatek_mt8192_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 5,
+ .pm_domain_names = mediatek_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
+};
+
+static const struct panfrost_compatible mediatek_mt8370_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const struct of_device_id dt_match[] = {
@@ -859,6 +949,8 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data },
+ { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 47751302f1bc..4042afe2fbf4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 7ed0cd3ea2d4..52f9d69f6db9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -54,6 +54,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g72 (\
@@ -64,6 +65,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g51 hw_features_g72
@@ -77,6 +79,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 8e0ff3efede7..85d6289a6eda 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
@@ -11,6 +12,36 @@
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
+{
+ struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+#else
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{}
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
+#endif
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -35,6 +66,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+ kfree_const(bo->label.str);
+ panfrost_gem_debugfs_bo_rm(bo);
+ mutex_destroy(&bo->label.lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -200,7 +235,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
@@ -260,6 +295,9 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_wc = !pfdev->coherent;
+ mutex_init(&obj->label.lock);
+
+ panfrost_gem_debugfs_bo_add(pfdev, obj);
return &obj->base.base;
}
@@ -300,5 +338,153 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
bo = to_panfrost_bo(obj);
bo->noexec = true;
+ /*
+ * We assign this generic label because this function cannot
+ * be reached through any of the Panfrost UM driver-specific
+ * code paths, unless one is given by explicitly calling the
+ * SET_LABEL_BO ioctl. It is therefore preferable to have a
+ * blanket BO tag that tells us the object was imported from
+ * another driver than nothing at all.
+ */
+ panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
+
return obj;
}
+
+void
+panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panfrost_gem_set_label(obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+struct flag_def {
+ u32 flag;
+ const char *name;
+};
+
+static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const struct flag_def gem_state_flags_names[] = {
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
+ gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = panfrost_gem_rss(&bo->base.base);
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ if (bo->base.madv < 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
+ else if (bo->base.madv > 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
+
+ seq_printf(m, "0x%-10x", gem_state_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panfrost_gem_object *bo;
+
+ panfrost_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state label\n");
+ seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
+ list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
+ panfrost_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "===================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 7516b7ecf7fe..8de3e76f2717 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -8,6 +8,46 @@
#include <drm/drm_mm.h>
struct panfrost_mmu;
+struct panfrost_device;
+
+#define PANFROST_BO_LABEL_MAXLEN 4096
+
+enum panfrost_debugfs_gem_state_flags {
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(0),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(1),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED: GEM BO was reclaimed by the shrinker. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED = BIT(2),
+
+ /**
+ * @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE: GEM BO pages were marked as no longer
+ * needed by UM and can be reclaimed by the shrinker.
+ */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE = BIT(3),
+};
+
+/**
+ * struct panfrost_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panfrost_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+};
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
@@ -41,8 +81,26 @@ struct panfrost_gem_object {
*/
size_t heap_rss_size;
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
bool noexec :1;
bool is_heap :1;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_gem_debugfs debugfs;
+#endif
};
struct panfrost_gem_mapping {
@@ -89,4 +147,12 @@ void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
int panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+void panfrost_gem_set_label(struct drm_gem_object *obj, const char *label);
+void panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label);
+
+#ifdef CONFIG_DEBUG_FS
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 3d9f51bd48b6..02b60ea1433a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge(&bo->base);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 5657106c2f7d..82acabb21b27 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -751,11 +751,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
int js = panfrost_job_get_slot(job);
/*
- * If the GPU managed to complete this jobs fence, the timeout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* Panfrost IRQ handler may take a long time to process an interrupt
@@ -770,7 +770,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
@@ -786,7 +786,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void panfrost_reset_work(struct work_struct *work)
@@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
- .timeout_wq = pfdev->reset.wq,
.name = "pan_js",
.dev = pfdev->dev,
};
@@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
if (!pfdev->reset.wq)
return -ENOMEM;
+ args.timeout_wq = pfdev->reset.wq;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b91019cd5acb..f6b91c052cfb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -26,6 +26,48 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
+static u64 mair_to_memattr(u64 mair, bool coherent)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
- struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
- u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
- u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+ u64 transtab = mmu->cfg.transtab;
+ u64 memattr = mmu->cfg.memattr;
+ u64 transcfg = mmu->cfg.transcfg;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
@@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+
+ /* TODO: The following fields are duplicated between the MMU and Page
+ * Table config structs. Ideally, should be kept in one place.
+ */
+ mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
+ mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
+ mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+
+ return 0;
+}
+
+static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
+ return -EINVAL;
+
+ mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+
+ mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
+ pgtbl_cfg->coherent_walk);
+
+ mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
+ if (pgtbl_cfg->coherent_walk)
+ mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ return 0;
+}
+
+static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
+ enum io_pgtable_fmt fmt)
+{
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ switch (fmt) {
+ case ARM_64_LPAE_S1:
+ return mmu_cfg_init_aarch64_4k(mmu);
+ case ARM_MALI_LPAE:
+ return mmu_cfg_init_mali_lpae(mmu);
+ default:
+ /* This should never happen */
+ drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ return -EINVAL;
+ }
+}
+
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int prot = IOMMU_READ | IOMMU_WRITE;
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
if (WARN_ON(mapping->active))
return 0;
@@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
@@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
struct panfrost_mmu *mmu;
+ enum io_pgtable_fmt fmt;
+ int ret;
+
+ if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
+ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
+ dev_err_once(pfdev->dev,
+ "AARCH64_4K page table not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+ fmt = ARM_64_LPAE_S1;
+ } else {
+ fmt = ARM_MALI_LPAE;
+ }
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
@@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .ias = va_bits,
+ .oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
- mmu);
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
if (!mmu->pgtbl_ops) {
- kfree(mmu);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_free_mmu;
}
+ ret = panfrost_mmu_cfg_init(mmu, fmt);
+ if (ret)
+ goto err_free_io_pgtable;
+
kref_init(&mmu->refcount);
return mmu;
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+
+err_free_mmu:
+ kfree(mmu);
+ return ERR_PTR(ret);
}
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ba9b6e2b2636..563f16bae543 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,11 +106,13 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_vmap_unlocked(&bo->base, &map);
+ ret = drm_gem_vmap(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
+ panfrost_gem_internal_set_label(&bo->base, "Perfcnt sample buffer");
+
/*
* Invalidate the cache and clear the counters to start from a fresh
* state.
@@ -165,7 +167,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_vunmap_unlocked(&bo->base, &map);
+ drm_gem_vunmap(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +197,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c7bba476ab3f..2b8f1617b836 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -16,6 +16,8 @@
#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
@@ -299,6 +301,17 @@
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
@@ -309,6 +322,24 @@
/* Additional Bifrost AS registers */
#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
@@ -324,6 +355,11 @@
#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+/*
+ * Begin AARCH64_4K MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0
+
#define AS_STATUS_AS_ACTIVE 0x01
#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index a9da1d1eeb70..f0b2da5b2b96 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -171,10 +171,6 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ret = panthor_gpu_coherency_init(ptdev);
- if (ret)
- return ret;
-
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
@@ -184,6 +180,11 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
+ INIT_LIST_HEAD(&ptdev->gems.node);
+#endif
+
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -247,6 +248,10 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
goto err_rpm_put;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index da6574021664..4fc7cf2aeed5 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -205,6 +205,17 @@ struct panthor_device {
/** @fast_rate: Maximum device clock frequency. Set by DVFS */
unsigned long fast_rate;
+
+#ifdef CONFIG_DEBUG_FS
+ /** @gems: Device-wide list of GEM objects owned by at least one file. */
+ struct {
+ /** @gems.lock: Protects the device-wide list of GEM objects. */
+ struct mutex lock;
+
+ /** @node: Used to keep track of all the device's DRM objects */
+ struct list_head node;
+ } gems;
+#endif
};
struct panthor_gpu_usage {
@@ -219,6 +230,24 @@ struct panthor_file {
/** @ptdev: Device attached to this file. */
struct panthor_device *ptdev;
+ /** @user_mmio: User MMIO related fields. */
+ struct {
+ /**
+ * @offset: Offset used for user MMIO mappings.
+ *
+ * This offset should not be used to check the type of mapping
+ * except in panthor_mmap(). After that point, MMIO mapping
+ * offsets have been adjusted to match
+ * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used
+ * instead.
+ * Make sure this rule is followed at all times, because
+ * userspace is in control of the offset, and can change the
+ * value behind our back. Otherwise it can lead to erroneous
+ * branching happening in kernel space.
+ */
+ u64 offset;
+ } user_mmio;
+
/** @vms: VM pool attached to this file. */
struct panthor_vm_pool *vms;
@@ -383,8 +412,6 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da
if (!status) \
break; \
\
- gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
- \
__handler(ptdev, status); \
ret = IRQ_HANDLED; \
} \
@@ -428,4 +455,75 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
extern struct workqueue_struct *panthor_cleanup_wq;
+static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data)
+{
+ writel(data, ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg)
+{
+ return readl(ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return readl_relaxed(ptdev->iomem + reg);
+}
+
+static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data)
+{
+ gpu_write(ptdev, reg, lower_32_bits(data));
+ gpu_write(ptdev, reg + 4, upper_32_bits(data));
+}
+
+static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read_relaxed(ptdev, reg) |
+ ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg)
+{
+ u32 lo, hi1, hi2;
+ do {
+ hi1 = gpu_read(ptdev, reg + 4);
+ lo = gpu_read(ptdev, reg);
+ hi2 = gpu_read(ptdev, reg + 4);
+ } while (hi1 != hi2);
+ return lo | ((u64)hi2 << 32);
+}
+
+#define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \
+ timeout_us, false, dev, reg)
+
+#define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 06fe46e32073..1116f2d2826e 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -772,8 +772,8 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
#else
arg->timestamp_frequency = 0;
#endif
- arg->current_timestamp = panthor_gpu_read_timestamp(ptdev);
- arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev);
+ arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
+ arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
pm_runtime_put(ptdev->base.dev);
return 0;
@@ -940,6 +940,7 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_mmap_offset *args = data;
+ struct panthor_gem_object *bo;
struct drm_gem_object *obj;
int ret;
@@ -950,6 +951,12 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
if (!obj)
return -ENOENT;
+ bo = to_panthor_bo(obj);
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
+ ret = -EPERM;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -989,7 +996,8 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
struct drm_sched_job *job;
- job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ job = panthor_job_create(pfile, args->group_handle, qsubmit,
+ file->client_id);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
@@ -1331,6 +1339,60 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
return 0;
}
+static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_set_label *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user((const char __user *)(uintptr_t)args->label,
+ PANTHOR_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panthor_gem_bo_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
+ void *data, struct drm_file *file)
+{
+ struct drm_panthor_set_user_mmio_offset *args = data;
+ struct panthor_file *pfile = file->driver_priv;
+
+ if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
+ args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+ return -EINVAL;
+
+ WRITE_ONCE(pfile->user_mmio.offset, args->offset);
+ return 0;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1348,6 +1410,18 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
}
pfile->ptdev = ptdev;
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT))
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+#endif
+
ret = panthor_vm_pool_create(pfile);
if (ret)
@@ -1400,6 +1474,8 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1408,30 +1484,26 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
struct panthor_file *pfile = file->driver_priv;
struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ u64 user_mmio_offset;
int ret, cookie;
if (!drm_dev_enter(file->minor->dev, &cookie))
return -ENODEV;
-#ifdef CONFIG_ARM64
- /*
- * With 32-bit systems being limited by the 32-bit representation of
- * mmap2's pgoffset field, we need to make the MMIO offset arch
- * specific. This converts a user MMIO offset into something the kernel
- * driver understands.
+ /* Adjust the user MMIO offset to match the offset used kernel side.
+ * We use a local variable with a READ_ONCE() here to make sure
+ * the user_mmio_offset we use for the is_user_mmio_mapping() check
+ * hasn't changed when we do the offset adjustment.
*/
- if (test_tsk_thread_flag(current, TIF_32BIT) &&
- offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
- offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
- DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
+ if (offset >= user_mmio_offset) {
+ offset -= user_mmio_offset;
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- }
-#endif
-
- if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
ret = panthor_device_mmap_io(ptdev, vma);
- else
+ } else {
ret = drm_gem_mmap(filp, vma);
+ }
drm_dev_exit(cookie);
return ret;
@@ -1496,9 +1568,34 @@ static const struct file_operations panthor_drm_driver_fops = {
};
#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gem_debugfs_print_bos(ptdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
static void panthor_debugfs_init(struct drm_minor *minor)
{
panthor_mmu_debugfs_init(minor);
+ panthor_gems_debugfs_init(minor);
}
#endif
@@ -1509,6 +1606,8 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
+ * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
+ * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1522,7 +1621,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 3,
+ .minor = 5,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 0f52766a3120..36f1034839c2 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Queue FW interface");
if (IS_ERR(mem))
return mem;
@@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "FW suspend buffer");
}
static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
@@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
DRM_PANTHOR_BO_NO_MMAP,
- vm_map_flags, va);
+ vm_map_flags, va, "FW section");
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
@@ -1008,6 +1010,8 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, JOB_INT_CLEAR, status);
+
if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
ptdev->fw->booted = true;
@@ -1059,8 +1063,8 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
u32 status;
gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
- if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_DISABLED, 10, 100000))
+ if (gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
drm_err(&ptdev->base, "Failed to stop MCU");
}
@@ -1085,8 +1089,9 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
- if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000)) {
+ if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10,
+ 100000)) {
ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 8244a4e6c2a2..a123bc740ba1 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -2,6 +2,7 @@
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
+#include <linux/cleanup.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -10,14 +11,68 @@
#include <drm/panthor_drm.h>
#include "panthor_device.h"
+#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo)
+{
+ INIT_LIST_HEAD(&bo->debugfs.node);
+}
+
+static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
+
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&ptdev->gems.lock);
+ list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&ptdev->gems.lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
+{
+ bo->debugfs.flags = usage_flags;
+ panthor_gem_debugfs_bo_add(bo);
+}
+#else
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {}
+#endif
+
static void panthor_gem_free_object(struct drm_gem_object *obj)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+ panthor_gem_debugfs_bo_rm(bo);
+
+ /*
+ * Label might have been allocated with kstrdup_const(),
+ * we need to take that into account when freeing the memory
+ */
+ kfree_const(bo->label.str);
+
+ mutex_destroy(&bo->label.lock);
+
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
@@ -67,17 +122,19 @@ out_free_bo:
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
+ * @name: Descriptive label of the BO's contents
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va)
+ u64 gpu_va, const char *name)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
struct panthor_gem_object *bo;
+ u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
int ret;
if (drm_WARN_ON(&ptdev->base, !vm))
@@ -97,6 +154,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ if (vm == panthor_fw_vm(ptdev))
+ debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
+
+ panthor_gem_kernel_bo_set_label(kbo, name);
+ panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
@@ -129,17 +192,6 @@ err_free_bo:
return ERR_PTR(ret);
}
-static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct panthor_gem_object *bo = to_panthor_bo(obj);
-
- /* Don't allow mmap on objects that have the NO_MMAP flag set. */
- if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
- return -EINVAL;
-
- return drm_gem_shmem_object_mmap(obj, vma);
-}
-
static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
{
@@ -155,7 +207,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
struct panthor_gem_object *bo = to_panthor_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
return res;
@@ -169,7 +221,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = {
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
- .mmap = panthor_gem_mmap,
+ .mmap = drm_gem_shmem_object_mmap,
.status = panthor_gem_status,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
@@ -196,6 +248,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+ mutex_init(&obj->label.lock);
+
+ panthor_gem_debugfs_bo_init(obj);
return &obj->base.base;
}
@@ -234,6 +289,8 @@ panthor_gem_create_with_handle(struct drm_file *file,
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
}
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -247,3 +304,144 @@ panthor_gem_create_with_handle(struct drm_file *file,
return ret;
}
+
+void
+panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
+{
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panthor_gem_bo_set_label(bo->obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const char * const gem_state_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
+ [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
+ };
+
+ static const char * const gem_usage_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
+ [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ if (!gem_state_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n");
+ }
+
+ seq_puts(m, "GEM usage flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
+ if (!gem_usage_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_usage_flags = bo->debugfs.flags;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panthor_gem_object *bo;
+
+ panthor_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n");
+ seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &ptdev->gems.lock) {
+ list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "==============================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 5749ef2ebe03..8fc7215e9b90 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -13,6 +13,53 @@
struct panthor_vm;
+#define PANTHOR_BO_LABEL_MAXLEN 4096
+
+enum panthor_debugfs_gem_state_flags {
+ PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT),
+};
+
+enum panthor_debugfs_gem_usage_flags {
+ PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL: BO is for kernel use only. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL = BIT(PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
+};
+
+/**
+ * struct panthor_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panthor_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+
+ /** @flags: Combination of panthor_debugfs_gem_usage_flags flags */
+ u32 flags;
+};
+
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
@@ -46,6 +93,24 @@ struct panthor_gem_object {
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
+
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panthor_gem_debugfs debugfs;
+#endif
};
/**
@@ -91,6 +156,9 @@ panthor_gem_create_with_handle(struct drm_file *file,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
+void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label);
+void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label);
+
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
@@ -112,7 +180,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
if (bo->kmap)
return 0;
- ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ ret = drm_gem_vmap(bo->obj, &map);
if (ret)
return ret;
@@ -126,7 +194,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
- drm_gem_vunmap_unlocked(bo->obj, &map);
+ drm_gem_vunmap(bo->obj, &map);
bo->kmap = NULL;
}
}
@@ -134,8 +202,13 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ u64 gpu_va, const char *name);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
+#ifdef CONFIG_DEBUG_FS
+void panthor_gem_debugfs_print_bos(struct panthor_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 671049020afa..cb7a335e07d7 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -108,14 +108,9 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
- ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
- ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
- ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
- ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
@@ -150,10 +145,11 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, GPU_INT_CLEAR, status);
+
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
- u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
- gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+ u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
@@ -244,45 +240,27 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwroff_reg, mask);
+ gpu_write64(ptdev, pwroff_reg, mask);
- if (mask >> 32)
- gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
return 0;
@@ -305,45 +283,27 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwron_reg, mask);
-
- if (mask >> 32)
- gpu_write(ptdev, pwron_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
+ gpu_write64(ptdev, pwron_reg, mask);
- ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
- val, (mask32 & val) == mask32,
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
+ (mask & val) == val,
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
}
return 0;
@@ -492,49 +452,3 @@ void panthor_gpu_resume(struct panthor_device *ptdev)
panthor_gpu_l2_power_on(ptdev);
}
-/**
- * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
- * @ptdev: Device.
- * @reg: The offset of the register to read.
- *
- * Return: The counter value.
- */
-static u64
-panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
-{
- u32 hi, lo;
-
- do {
- hi = gpu_read(ptdev, reg + 0x4);
- lo = gpu_read(ptdev, reg);
- } while (hi != gpu_read(ptdev, reg + 0x4));
-
- return ((u64)hi << 32) | lo;
-}
-
-/**
- * panthor_gpu_read_timestamp() - Read the timestamp register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp value.
- */
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
-{
- return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
-}
-
-/**
- * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp offset value.
- */
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
-{
- u32 hi, lo;
-
- hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
- lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
-
- return ((u64)hi << 32) | lo;
-}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index 7f6133a66127..7c17a8c06858 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -30,9 +30,9 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_on(ptdev, #type, \
- type ## _PWRON_LO, \
- type ## _PWRTRANS_LO, \
- type ## _READY_LO, \
+ type ## _PWRON, \
+ type ## _PWRTRANS, \
+ type ## _READY, \
mask, timeout_us)
/**
@@ -42,15 +42,13 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_off(ptdev, #type, \
- type ## _PWROFF_LO, \
- type ## _PWRTRANS_LO, \
+ type ## _PWROFF, \
+ type ## _PWRTRANS, \
mask, timeout_us)
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3bdf61c14264..d236e9ceade4 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Tiler heap chunk");
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
@@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Heap pool");
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 12a02e28f50f..4140f697ba5a 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -510,9 +510,9 @@ static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending.
*/
- ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
- val, !(val & AS_STATUS_AS_ACTIVE),
- 10, 100000);
+ ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
+ !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
if (ret) {
panthor_device_schedule_reset(ptdev);
@@ -564,8 +564,7 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
region = region_width | region_start;
/* Lock the region that needs to be updated */
- gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
- gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ gpu_write64(ptdev, AS_LOCKADDR(as_nr), region);
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
@@ -615,14 +614,9 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -635,14 +629,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -781,6 +770,7 @@ out_enable_as:
if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
}
@@ -895,17 +885,6 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
return ret;
}
-/**
- * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
- * @vm: VM whose cache to flush
- *
- * Return: 0 on success, a negative error code if flush failed.
- */
-int panthor_vm_flush_all(struct panthor_vm *vm)
-{
- return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
-}
-
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
@@ -1103,7 +1082,7 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
/* If the vm_bo object was destroyed, release the pin reference that
* was hold by this object.
*/
- if (unpin && !bo->base.base.import_attach)
+ if (unpin && !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
drm_gpuvm_put(vm);
@@ -1234,7 +1213,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (ret)
goto err_cleanup;
- if (!bo->base.base.import_attach) {
+ if (!drm_gem_is_imported(&bo->base.base)) {
/* Pre-reserve the BO pages, so the map operation doesn't have to
* allocate.
*/
@@ -1245,7 +1224,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = PTR_ERR(sgt);
@@ -1256,7 +1235,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
if (!preallocated_vm_bo) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = -ENOMEM;
@@ -1282,7 +1261,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* which will be released in panthor_vm_bo_put().
*/
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
- !bo->base.base.import_attach)
+ !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
op_ctx->map.bo_offset = offset;
@@ -1680,8 +1659,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
u32 source_id;
fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
- addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
- addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+ addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
/* decode the fault status */
exception_type = fault_status & 0xFF;
@@ -1709,11 +1687,17 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
access_type, access_type_name(ptdev, fault_status),
source_id);
+ /* We don't handle VM faults at the moment, so let's just clear the
+ * interrupt and let the writer/reader crash.
+ * Note that COMPLETED irqs are never cleared, but this is fine
+ * because they are always masked.
+ */
+ gpu_write(ptdev, MMU_INT_CLEAR, mask);
+
/* Ignore MMU interrupts on this AS until it's been
* re-enabled.
*/
ptdev->mmu->irq.mask = new_int_mask;
- gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
if (ptdev->mmu->as.slots[as].vm)
ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
@@ -2275,7 +2259,7 @@ static enum drm_gpu_sched_stat
panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
{
WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
@@ -2516,7 +2500,7 @@ panthor_vm_bind_job_create(struct drm_file *file,
kref_init(&job->refcount);
job->vm = panthor_vm_get(vm);
- ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index fc274637114e..0e268fdfdb2f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -33,7 +33,6 @@ int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
-int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index b7b3b3add166..48bbfd40138c 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -63,20 +63,16 @@
#define GPU_STATUS_DBG_ENABLED BIT(8)
#define GPU_FAULT_STATUS 0x3C
-#define GPU_FAULT_ADDR_LO 0x40
-#define GPU_FAULT_ADDR_HI 0x44
+#define GPU_FAULT_ADDR 0x40
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
-#define GPU_TIMESTAMP_OFFSET_LO 0x88
-#define GPU_TIMESTAMP_OFFSET_HI 0x8C
-#define GPU_CYCLE_COUNT_LO 0x90
-#define GPU_CYCLE_COUNT_HI 0x94
-#define GPU_TIMESTAMP_LO 0x98
-#define GPU_TIMESTAMP_HI 0x9C
+#define GPU_TIMESTAMP_OFFSET 0x88
+#define GPU_CYCLE_COUNT 0x90
+#define GPU_TIMESTAMP 0x98
#define GPU_THREAD_MAX_THREADS 0xA0
#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
@@ -85,47 +81,29 @@
#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
-#define GPU_SHADER_PRESENT_LO 0x100
-#define GPU_SHADER_PRESENT_HI 0x104
-#define GPU_TILER_PRESENT_LO 0x110
-#define GPU_TILER_PRESENT_HI 0x114
-#define GPU_L2_PRESENT_LO 0x120
-#define GPU_L2_PRESENT_HI 0x124
-
-#define SHADER_READY_LO 0x140
-#define SHADER_READY_HI 0x144
-#define TILER_READY_LO 0x150
-#define TILER_READY_HI 0x154
-#define L2_READY_LO 0x160
-#define L2_READY_HI 0x164
-
-#define SHADER_PWRON_LO 0x180
-#define SHADER_PWRON_HI 0x184
-#define TILER_PWRON_LO 0x190
-#define TILER_PWRON_HI 0x194
-#define L2_PWRON_LO 0x1A0
-#define L2_PWRON_HI 0x1A4
-
-#define SHADER_PWROFF_LO 0x1C0
-#define SHADER_PWROFF_HI 0x1C4
-#define TILER_PWROFF_LO 0x1D0
-#define TILER_PWROFF_HI 0x1D4
-#define L2_PWROFF_LO 0x1E0
-#define L2_PWROFF_HI 0x1E4
-
-#define SHADER_PWRTRANS_LO 0x200
-#define SHADER_PWRTRANS_HI 0x204
-#define TILER_PWRTRANS_LO 0x210
-#define TILER_PWRTRANS_HI 0x214
-#define L2_PWRTRANS_LO 0x220
-#define L2_PWRTRANS_HI 0x224
-
-#define SHADER_PWRACTIVE_LO 0x240
-#define SHADER_PWRACTIVE_HI 0x244
-#define TILER_PWRACTIVE_LO 0x250
-#define TILER_PWRACTIVE_HI 0x254
-#define L2_PWRACTIVE_LO 0x260
-#define L2_PWRACTIVE_HI 0x264
+#define GPU_SHADER_PRESENT 0x100
+#define GPU_TILER_PRESENT 0x110
+#define GPU_L2_PRESENT 0x120
+
+#define SHADER_READY 0x140
+#define TILER_READY 0x150
+#define L2_READY 0x160
+
+#define SHADER_PWRON 0x180
+#define TILER_PWRON 0x190
+#define L2_PWRON 0x1A0
+
+#define SHADER_PWROFF 0x1C0
+#define TILER_PWROFF 0x1D0
+#define L2_PWROFF 0x1E0
+
+#define SHADER_PWRTRANS 0x200
+#define TILER_PWRTRANS 0x210
+#define L2_PWRTRANS 0x220
+
+#define SHADER_PWRACTIVE 0x240
+#define TILER_PWRACTIVE 0x250
+#define L2_PWRACTIVE 0x260
#define GPU_REVID 0x280
@@ -133,8 +111,8 @@
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
#define GPU_COHERENCY_PROTOCOL 0x304
-#define GPU_COHERENCY_ACE 0
-#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_ACE_LITE 0
+#define GPU_COHERENCY_ACE 1
#define GPU_COHERENCY_NONE 31
#define MCU_CONTROL 0x700
@@ -168,10 +146,8 @@
#define MMU_AS_SHIFT 6
#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
-#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
-#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
-#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
-#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_TRANSTAB(as) (MMU_AS(as) + 0x0)
+#define AS_MEMATTR(as) (MMU_AS(as) + 0x8)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
((w) ? BIT(0) : 0) | \
@@ -183,8 +159,7 @@
#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
-#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
-#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_LOCKADDR(as) (MMU_AS(as) + 0x10)
#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
#define AS_COMMAND_NOP 0
#define AS_COMMAND_UPDATE 1
@@ -199,12 +174,10 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
-#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
-#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_FAULTADDRESS(as) (MMU_AS(as) + 0x20)
#define AS_STATUS(as) (MMU_AS(as) + 0x28)
#define AS_STATUS_AS_ACTIVE BIT(0)
-#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
-#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG(as) (MMU_AS(as) + 0x30)
#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
@@ -222,18 +195,11 @@
#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
#define AS_TRANSCFG_WXN BIT(35)
#define AS_TRANSCFG_XREADABLE BIT(36)
-#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
-#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+#define AS_FAULTEXTRA(as) (MMU_AS(as) + 0x38)
#define CSF_GPU_LATEST_FLUSH_ID 0x10000
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
-#define gpu_write(dev, reg, data) \
- writel(data, (dev)->iomem + (reg))
-
-#define gpu_read(dev, reg) \
- readl((dev)->iomem + (reg))
-
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 4d31d1967716..8f17394cc82a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
if (queue->syncwait.kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
- drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ drm_gem_vunmap(queue->syncwait.obj, &map);
queue->syncwait.kmap = NULL;
}
@@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
goto err_put_syncwait_obj;
queue->syncwait.obj = &bo->base.base;
- ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ ret = drm_gem_vmap(queue->syncwait.obj, &map);
if (drm_WARN_ON(&ptdev->base, ret))
goto err_put_syncwait_obj;
@@ -3241,7 +3241,7 @@ queue_timedout_job(struct drm_sched_job *sched_job)
queue_start(queue);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void queue_free_job(struct drm_sched_job *sched_job)
@@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "CS ring buffer");
if (IS_ERR(queue->ringbuf)) {
ret = PTR_ERR(queue->ringbuf);
goto err_free_queue;
@@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group job stats");
if (IS_ERR(queue->profiling.slots)) {
ret = PTR_ERR(queue->profiling.slots);
@@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group sync objects");
if (IS_ERR(group->syncobjs)) {
ret = PTR_ERR(group->syncobjs);
goto err_put_group;
@@ -3729,7 +3732,8 @@ struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit)
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id)
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_job *job;
@@ -3801,7 +3805,7 @@ panthor_job_create(struct panthor_file *pfile,
ret = drm_sched_job_init(&job->base,
&job->group->queues[job->queue_idx]->entity,
- credits, job->group);
+ credits, job->group, drm_client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index e650a445cf50..742b0b4ff3a3 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -29,7 +29,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit);
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id);
struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
void panthor_job_put(struct drm_sched_job *job);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1e4b28d03f4d..5f460b296c0c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
- if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 69427eb8bed2..d8f24bcae34b 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU && HAS_IOPORT
+ depends on DRM && PCI && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 70aff64ced87..ae7e572b1b4a 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1176,9 +1176,10 @@ err_drm_connector_cleanup:
static struct drm_framebuffer *
qxl_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file_priv, info, mode_cmd,
&qxl_fb_funcs);
}
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f51bace9555d..c479f0c0dd5c 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -2,7 +2,7 @@
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on AGP || !AGP
select FW_LOADER
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 2db40789235c..1afa70566985 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3825,8 +3825,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
-/***********************************************************************************/
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
@@ -5072,7 +5071,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fa78824931cc..3f3c360dce4b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 11a492f21157..51a3e0fc2f56 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev)
*/
int cik_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
+ struct radeon_ring *ring, *ring_cp1, *ring_cp2;
int r;
/* Read BIOS */
@@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ ring_cp1->ring_obj = NULL;
+ ring_cp2->ring_obj = NULL;
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
+
+ r600_ring_init(rdev, ring_cp1, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index);
if (r)
return r;
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ r600_ring_init(rdev, ring_cp2, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index);
if (r)
- return r;
+ goto out;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring->ring_obj = NULL;
@@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
- return r;
+ goto out;
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
@@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev)
*/
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
return 0;
+
+out:
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ return r;
}
/**
@@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
@@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
radeon_uvd_fini(rdev);
radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a46613283393..266c57733136 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -211,7 +211,7 @@ static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
surf->base_align = track->group_size;
surf->palign = palign;
surf->halign = 1;
- if (surf->nbx & (palign - 1)) {
+ if ((surf->nbx & (palign - 1)) && !(palign == 64 && surf->nbx == 32)) {
if (prefix) {
dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
__func__, __LINE__, prefix, surf->nbx, palign);
@@ -2661,6 +2661,95 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_COND_EXEC:
+ {
+ u64 offset;
+
+ if (pkt->count != 2) {
+ DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 0);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 0] = offset;
+ ib[idx + 1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
+ case PACKET3_COND_WRITE:
+ if (pkt->count != 7) {
+ DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x10) {
+ u64 offset;
+ /* POLL is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 1);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 1] = offset;
+ ib[idx + 2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* POLL is a reg. */
+ reg = radeon_get_ib_value(p, idx + 1) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
+ return -EINVAL;
+ }
+ }
+ if (idx_value & 0x100) {
+ u64 offset;
+ /* WRITE is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 5);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 5] = offset;
+ ib[idx + 6] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* WRITE is a reg. */
+ reg = radeon_get_ib_value(p, idx + 5) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
+ return -EINVAL;
+ }
+ }
+ break;
case PACKET3_NOP:
break;
default:
@@ -3406,7 +3495,12 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
case CAYMAN_PACKET3_DEALLOC_STATE:
break;
case PACKET3_COND_WRITE:
- if (idx_value & 0x100) {
+ if (!(idx_value & 0x10)) {
+ reg = ib[idx + 1] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ if (!(idx_value & 0x100)) {
reg = ib[idx + 5] * 4;
if (!evergreen_vm_reg_valid(reg))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 55dbf450bd9c..4aa050385284 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2329,7 +2329,7 @@ static int kv_parse_sys_info_table(struct radeon_device *rdev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 661f374f5f27..9758f3a9df75 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
return result;
}
-/*
- * write the audio workaround status to the hardware
- */
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
- bool hdmi_audio_workaround = false; /* FIXME */
- u32 value;
-
- if (!hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder))
- value = 0; /* disable workaround */
- else
- value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- value, ~HDMI0_AUDIO_TEST_EN);
-}
-
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8605c074d9f7..63c47585afbc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -394,9 +394,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8f5e07834fcc..9e697f10f9ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -401,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size);
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
u32 r600_get_xclk(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 64b26bfeafc9..b8e6202f1d5b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -409,7 +409,6 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* radeon_cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
- * @backoff: indicator to backoff the reservation
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bbd39348a7ab..7a3e510327b7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,7 +26,6 @@
* Jerome Glisse
*/
-#include <linux/console.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (notify_clients) {
- console_lock();
- drm_client_dev_suspend(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_suspend(dev, false);
+
return 0;
}
@@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (notify_clients) {
- console_lock();
- }
if (resume) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- if (notify_clients)
- console_unlock();
+ if (pci_enable_device(pdev))
return -1;
- }
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
- if (notify_clients) {
- drm_client_dev_resume(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_resume(dev, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8f5f8abcb1b4..4dc77c398617 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
int
radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
fb->obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@@ -1314,6 +1315,7 @@ radeon_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
@@ -1340,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 267f082bc430..88e821d67af7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -110,9 +110,10 @@
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
* 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
* 2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL)
+ * 2.51.0 - Add evergreen/cayman OpenGL 4.6 compatibility
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 50
+#define KMS_DRIVER_MINOR 51
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_no_wb;
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index d4a58bd679db..dc81b0c2dbff 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
- const struct drm_format_info *info;
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@@ -67,7 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -205,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
+ const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct drm_gem_object *gobj;
@@ -223,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
+ format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
+ mode_cmd.modifier[0]);
+ ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
@@ -235,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8ff4f18b51a9..5b5b54e876d4 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -575,48 +575,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
}
/**
- * radeon_fence_wait_any - wait for a fence to signal on any ring
- *
- * @rdev: radeon device pointer
- * @fences: radeon fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics). Fence
- * array is indexed by ring id. @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr)
-{
- uint64_t seq[RADEON_NUM_RINGS];
- unsigned int i, num_rings = 0;
- long r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- seq[i] = 0;
-
- if (!fences[i])
- continue;
-
- seq[i] = fences[i]->seq;
- ++num_rings;
- }
-
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
-
- r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-/**
* radeon_fence_wait_next - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3102f6c2d055..9e34da2cacef 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -40,6 +40,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
+struct drm_format_info;
struct edid;
struct drm_edid;
@@ -890,6 +891,7 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *rfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 65a911ddd509..f9267b026f8d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1109,7 +1109,7 @@
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
+#define MAX_POWER_SHIFT 16
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index 79b67c406bd6..93ba115d654f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -32,11 +32,6 @@ struct rcar_cmm {
} lut;
};
-static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
-{
- return ioread32(rcmm->base + reg);
-}
-
static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
{
iowrite32(data, rcmm->base + reg);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 70d8ad065bfa..216219accfd9 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -426,6 +426,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
@@ -490,7 +491,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
@@ -705,7 +706,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
- goto error;
+ goto done;
/*
* Add the VSP to the list or update the corresponding existing
@@ -743,13 +744,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto done;
}
- return 0;
-
-error:
+done:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
index f9893d7d6dfc..e9e59c5e70d5 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
@@ -16,7 +16,7 @@ struct rcar_du_format_info;
struct rcar_du_group;
/*
- * The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * The R-Car DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 380a855b832a..af58b814e588 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (!lvds->next_bridge)
return 0;
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge, bridge,
flags);
}
@@ -877,9 +878,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
struct rcar_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct rcar_lvds, bridge,
+ &rcar_lvds_bridge_ops);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
platform_set_drvdata(pdev, lvds);
@@ -894,7 +896,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index d1e626068065..1af4c73f7a88 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
@@ -917,7 +918,6 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
}
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
drm_bridge_add(&dsi->bridge);
@@ -1003,9 +1003,10 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
struct rcar_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (dsi == NULL)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rcar_mipi_dsi, bridge,
+ &rcar_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 7c1817240846..e57536fd6f4d 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -14,10 +14,15 @@ config DRM_RZG2L_DU
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
+config DRM_RZG2L_USE_MIPI_DSI
+ bool "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ depends on DRM_RZG2L_DU || COMPILE_TEST
+ default DRM_RZG2L_DU
help
Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
+
+config DRM_RZG2L_MIPI_DSI
+ def_tristate DRM_RZG2L_DU
+ depends on DRM_RZG2L_USE_MIPI_DSI
+ select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index cbd9b9841267..e1aa6a719529 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -50,9 +50,20 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
}
};
+static const struct rzg2l_du_device_info rzg2l_du_r9a09g057_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct of_device_id rzg2l_du_of_table[] = {
{ .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { .compatible = "renesas,r9a09g057-du", .data = &rzg2l_du_r9a09g057_info },
{ /* sentinel */ }
};
@@ -79,7 +90,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .dumb_create = rzg2l_du_dumb_create,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create),
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 564ab4cb3d37..5e6dd16705e6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -22,6 +22,26 @@
* Encoder
*/
+static unsigned int rzg2l_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
@@ -50,10 +70,26 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
struct drm_bridge *bridge;
int ret;
- /* Locate the DRM bridge from the DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge)
- return -EPROBE_DEFER;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if (output == RZG2L_DU_OUTPUT_DPAD0 && rzg2l_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+ }
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
enc_node, rzg2l_du_output_name(output));
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 90c6269ccd29..87f171145a23 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -36,23 +36,129 @@
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
- .fourcc = DRM_FORMAT_XRGB8888,
- .v4l2 = V4L2_PIX_FMT_XBGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.planes = 1,
.hsub = 1,
}, {
- .fourcc = DRM_FORMAT_ARGB8888,
- .v4l2 = V4L2_PIX_FMT_ABGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
- .bpp = 24,
.planes = 1,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
+ .planes = 3,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
+ .planes = 3,
+ .hsub = 1,
}
};
@@ -85,6 +191,7 @@ int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct rzg2l_du_format_info *format;
@@ -108,7 +215,7 @@ rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
index 876e97cfbf45..e2c599f115c6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -23,7 +23,6 @@ struct sg_table;
struct rzg2l_du_format_info {
u32 fourcc;
u32 v4l2;
- unsigned int bpp;
unsigned int planes;
unsigned int hsub;
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 8643ff2eec46..040d4e4aff00 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -340,6 +340,15 @@ int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rzg2l_du_vsp_plane_helper_funcs);
+
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, i, 0,
+ num_planes - 1);
+
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
}
return 0;
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 4550c6d84796..f87337c3cbb5 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -4,10 +4,14 @@
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -23,13 +28,37 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
#include "rzg2l_mipi_dsi_regs.h"
+#define RZG2L_DCS_BUF_SIZE 128 /* Maximum DCS buffer size in external memory. */
+
+#define RZ_MIPI_DSI_FEATURE_16BPP BIT(0)
+
+struct rzg2l_mipi_dsi;
+
+struct rzg2l_mipi_dsi_hw_info {
+ int (*dphy_init)(struct rzg2l_mipi_dsi *dsi, u64 hsfreq_millihz);
+ void (*dphy_startup_late_init)(struct rzg2l_mipi_dsi *dsi);
+ void (*dphy_exit)(struct rzg2l_mipi_dsi *dsi);
+ int (*dphy_conf_clks)(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz);
+ unsigned int (*dphy_mode_clk_check)(struct rzg2l_mipi_dsi *dsi,
+ unsigned long mode_freq);
+ u32 phy_reg_offset;
+ u32 link_reg_offset;
+ unsigned long min_dclk;
+ unsigned long max_dclk;
+ u8 features;
+};
+
struct rzg2l_mipi_dsi {
struct device *dev;
void __iomem *mmio;
+ const struct rzg2l_mipi_dsi_hw_info *info;
+
struct reset_control *rstc;
struct reset_control *arstc;
struct reset_control *prstc;
@@ -44,6 +73,10 @@ struct rzg2l_mipi_dsi {
unsigned int num_data_lanes;
unsigned int lanes;
unsigned long mode_flags;
+
+ /* DCS buffer pointers when using external memory. */
+ dma_addr_t dcs_buf_phys;
+ u8 *dcs_buf_virt;
};
static inline struct rzg2l_mipi_dsi *
@@ -75,7 +108,7 @@ struct rzg2l_mipi_dsi_timings {
static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
{
- .hsfreq_max = 80000,
+ .hsfreq_max = 80000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 13,
@@ -89,7 +122,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 125000,
+ .hsfreq_max = 125000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -103,7 +136,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 250000,
+ .hsfreq_max = 250000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -117,7 +150,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 360000,
+ .hsfreq_max = 360000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 10,
@@ -131,7 +164,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 720000,
+ .hsfreq_max = 720000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -145,7 +178,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 1500000,
+ .hsfreq_max = 1500000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -162,22 +195,22 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + reg);
+ iowrite32(data, dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg);
+ iowrite32(data, dsi->mmio + dsi->info->link_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + reg);
+ return ioread32(dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + LINK_REG_OFFSET + reg);
+ return ioread32(dsi->mmio + dsi->info->link_reg_offset + reg);
}
/* -----------------------------------------------------------------------------
@@ -185,8 +218,9 @@ static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
*/
static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi,
- unsigned long hsfreq)
+ u64 hsfreq_millihz)
{
+ unsigned long hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
const struct rzg2l_mipi_dsi_timings *dphy_timings;
unsigned int i;
u32 dphyctrl0;
@@ -255,20 +289,17 @@ static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi)
reset_control_assert(dsi->rstc);
}
-static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
- const struct drm_display_mode *mode)
+static int rzg2l_dphy_conf_clks(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz)
{
- unsigned long hsfreq;
+ unsigned long vclk_rate;
unsigned int bpp;
- u32 txsetr;
- u32 clstptsetr;
- u32 lptrnstsetr;
- u32 clkkpt;
- u32 clkbfht;
- u32 clkstpt;
- u32 golpbkt;
- int ret;
+ clk_set_rate(dsi->vclk, mode_freq * KILO);
+ vclk_rate = clk_get_rate(dsi->vclk);
+ if (vclk_rate != mode_freq * KILO)
+ dev_dbg(dsi->dev, "Requested vclk rate %lu, actual %lu mismatch\n",
+ mode_freq * KILO, vclk_rate);
/*
* Relationship between hsclk and vclk must follow
* vclk * bpp = hsclk * 8 * lanes
@@ -277,18 +308,39 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* hsclk: DSI HS Byte clock frequency (Hz)
* lanes: number of data lanes
*
- * hsclk(bit) = hsclk(byte) * 8
+ * hsclk(bit) = hsclk(byte) * 8 = hsfreq
*/
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes);
+ *hsfreq_millihz = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(vclk_rate, bpp * MILLI),
+ dsi->lanes);
+
+ return 0;
+}
+
+static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ unsigned long hsfreq;
+ u64 hsfreq_millihz;
+ u32 txsetr;
+ u32 clstptsetr;
+ u32 lptrnstsetr;
+ u32 clkkpt;
+ u32 clkbfht;
+ u32 clkstpt;
+ u32 golpbkt;
+ u32 dsisetr;
+ int ret;
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
return ret;
- clk_set_rate(dsi->vclk, mode->clock * 1000);
+ ret = dsi->info->dphy_conf_clks(dsi, mode->clock, &hsfreq_millihz);
+ if (ret < 0)
+ goto err_phy;
- ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq);
+ ret = dsi->info->dphy_init(dsi, hsfreq_millihz);
if (ret < 0)
goto err_phy;
@@ -296,6 +348,10 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN;
rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr);
+ if (dsi->info->dphy_startup_late_init)
+ dsi->info->dphy_startup_late_init(dsi);
+
+ hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
/*
* Global timings characteristic depends on high speed Clock Frequency
* Currently MIPI DSI-IF just supports maximum FHD@60 with:
@@ -304,12 +360,12 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* - data lanes: maximum 4 lanes
* Therefore maximum hsclk will be 891 Mbps.
*/
- if (hsfreq > 445500) {
+ if (hsfreq > 445500000) {
clkkpt = 12;
clkbfht = 15;
clkstpt = 48;
golpbkt = 75;
- } else if (hsfreq > 250000) {
+ } else if (hsfreq > 250000000) {
clkkpt = 7;
clkbfht = 8;
clkstpt = 27;
@@ -328,10 +384,19 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt);
rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr);
+ /*
+ * Increase MRPSZ as the default value of 1 will result in long read
+ * commands payload not being saved to memory.
+ */
+ dsisetr = rzg2l_mipi_dsi_link_read(dsi, DSISETR);
+ dsisetr &= ~DSISETR_MRPSZ;
+ dsisetr |= FIELD_PREP(DSISETR_MRPSZ, RZG2L_DCS_BUF_SIZE);
+ rzg2l_mipi_dsi_link_write(dsi, DSISETR, dsisetr);
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
return ret;
@@ -339,7 +404,7 @@ err_phy:
static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi)
{
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
}
@@ -479,7 +544,7 @@ static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi)
u32 status;
int ret;
- /* Configuration for Blanking sequence and start video input*/
+ /* Configuration for Blanking sequence and start video input */
vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP |
VICH1SET0R_HSANOLP | VICH1SET0R_VSTART;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r);
@@ -523,16 +588,17 @@ err:
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
-static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void rzg2l_mipi_dsi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
@@ -549,6 +615,13 @@ static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
return;
rzg2l_mipi_dsi_set_display_timing(dsi, mode);
+}
+
+static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+ int ret;
ret = rzg2l_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
@@ -581,9 +654,22 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- if (mode->clock > 148500)
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+
+ if (mode->clock > dsi->info->max_dclk)
return MODE_CLOCK_HIGH;
+ if (mode->clock < dsi->info->min_dclk)
+ return MODE_CLOCK_LOW;
+
+ if (dsi->info->dphy_mode_clk_check) {
+ enum drm_mode_status status;
+
+ status = dsi->info->dphy_mode_clk_check(dsi, mode->clock);
+ if (status != MODE_OK)
+ return status;
+ }
+
return MODE_OK;
}
@@ -592,6 +678,7 @@ static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = rzg2l_mipi_dsi_atomic_pre_enable,
.atomic_enable = rzg2l_mipi_dsi_atomic_enable,
.atomic_disable = rzg2l_mipi_dsi_atomic_disable,
.mode_valid = rzg2l_mipi_dsi_bridge_mode_valid,
@@ -616,8 +703,16 @@ static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host,
switch (mipi_dsi_pixel_format_to_bpp(device->format)) {
case 24:
+ break;
case 18:
break;
+ case 16:
+ if (!(dsi->info->features & RZ_MIPI_DSI_FEATURE_16BPP)) {
+ dev_err(dsi->dev, "Unsupported format 0x%04x\n",
+ device->format);
+ return -EINVAL;
+ }
+ break;
default:
dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format);
return -EINVAL;
@@ -650,9 +745,168 @@ static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rzg2l_mipi_dsi_read_response(struct rzg2l_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *msg_rx = msg->rx_buf;
+ u8 datatype;
+ u32 result;
+ u16 size;
+
+ result = rzg2l_mipi_dsi_link_read(dsi, RXRSS0R);
+ if (result & RXRSS0R_RXPKTDFAIL) {
+ dev_err(dsi->dev, "packet rx data did not save correctly\n");
+ return -EPROTO;
+ }
+
+ if (result & RXRSS0R_RXFAIL) {
+ dev_err(dsi->dev, "packet rx failure\n");
+ return -EPROTO;
+ }
+
+ if (!(result & RXRSS0R_RXSUC))
+ return -EPROTO;
+
+ datatype = FIELD_GET(RXRSS0R_DT, result);
+
+ switch (datatype) {
+ case 0:
+ dev_dbg(dsi->dev, "ACK\n");
+ return 0;
+ case MIPI_DSI_RX_END_OF_TRANSMISSION:
+ dev_dbg(dsi->dev, "EoTp\n");
+ return 0;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_dbg(dsi->dev, "Acknowledge and error report: $%02x%02x\n",
+ (u8)FIELD_GET(RXRSS0R_DATA1, result),
+ (u8)FIELD_GET(RXRSS0R_DATA0, result));
+ return 0;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ return 1;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ msg_rx[1] = FIELD_GET(RXRSS0R_DATA1, result);
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = FIELD_GET(RXRSS0R_WC, result);
+
+ if (size > msg->rx_len) {
+ dev_err(dsi->dev, "rx buffer too small");
+ return -ENOSPC;
+ }
+
+ memcpy(msg_rx, dsi->dcs_buf_virt, size);
+ return size;
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n", datatype);
+ return -EPROTO;
+ }
+}
+
+static ssize_t rzg2l_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ bool need_bta;
+ u32 value;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Terminate operation after this descriptor is finished */
+ value = SQCH0DSC0AR_NXACT_TERM;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ need_bta = true; /* Message with explicitly requested ACK */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NON_READ);
+ } else if (msg->rx_buf && msg->rx_len > 0) {
+ need_bta = true; /* Read request */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_READ);
+ } else {
+ need_bta = false;
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NONE);
+ }
+
+ /* Set transmission speed */
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ value |= SQCH0DSC0AR_SPD_LOW;
+ else
+ value |= SQCH0DSC0AR_SPD_HIGH;
+
+ /* Write TX packet header */
+ value |= FIELD_PREP(SQCH0DSC0AR_DT, packet.header[0]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA0, packet.header[1]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA1, packet.header[2]);
+
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ value |= SQCH0DSC0AR_FMT_LONG;
+
+ if (packet.payload_length > RZG2L_DCS_BUF_SIZE) {
+ dev_err(dsi->dev, "Packet Tx payload size (%d) too large",
+ (unsigned int)packet.payload_length);
+ return -ENOSPC;
+ }
+
+ /* Copy TX packet payload data to memory space */
+ memcpy(dsi->dcs_buf_virt, packet.payload, packet.payload_length);
+ } else {
+ value |= SQCH0DSC0AR_FMT_SHORT;
+ }
+
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0AR, value);
+
+ /*
+ * Write: specify payload data source location, only used for
+ * long packet.
+ * Read: specify payload data storage location of response
+ * packet. Note: a read packet is always a short packet.
+ * If the response packet is a short packet or a long packet
+ * with WC = 0 (no payload), DTSEL is meaningless.
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0BR, SQCH0DSC0BR_DTSEL_MEM_SPACE);
+
+ /*
+ * Set SQCHxSR.AACTFIN bit when descriptor actions are finished.
+ * Read: set Rx result save slot number to 0 (ACTCODE).
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0CR, SQCH0DSC0CR_FINACT);
+
+ /* Set rx/tx payload data address, only relevant for long packet. */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0DR, (u32)dsi->dcs_buf_phys);
+
+ /* Start sequence 0 operation */
+ value = rzg2l_mipi_dsi_link_read(dsi, SQCH0SET0R);
+ value |= SQCH0SET0R_START;
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SET0R, value);
+
+ /* Wait for operation to finish */
+ ret = read_poll_timeout(rzg2l_mipi_dsi_link_read,
+ value, value & SQCH0SR_ADESFIN,
+ 2000, 20000, false, dsi, SQCH0SR);
+ if (ret == 0) {
+ /* Success: clear status bit */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SCR, SQCH0SCR_ADESFIN);
+
+ if (need_bta)
+ ret = rzg2l_mipi_dsi_read_response(dsi, msg);
+ else
+ ret = packet.payload_length;
+ }
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
.attach = rzg2l_mipi_dsi_host_attach,
.detach = rzg2l_mipi_dsi_host_detach,
+ .transfer = rzg2l_mipi_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -700,13 +954,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
u32 txsetr;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rzg2l_mipi_dsi, bridge,
+ &rzg2l_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0)
return dev_err_probe(dsi->dev, ret,
@@ -722,7 +979,7 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->vclk))
return PTR_ERR(dsi->vclk);
- dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst");
+ dsi->rstc = devm_reset_control_get_optional_exclusive(dsi->dev, "rst");
if (IS_ERR(dsi->rstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc),
"failed to get rst\n");
@@ -750,17 +1007,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
* mode->clock and format are not available. So initialize DPHY with
* timing parameters for 80Mbps.
*/
- ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000);
+ ret = dsi->info->dphy_init(dsi, 80000000ULL * MILLI);
if (ret < 0)
goto err_phy;
txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR);
dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes);
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
/* Init host device */
@@ -770,10 +1026,15 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_disable;
+ dsi->dcs_buf_virt = dma_alloc_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE,
+ &dsi->dcs_buf_phys, GFP_KERNEL);
+ if (!dsi->dcs_buf_virt)
+ return -ENOMEM;
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
err_pm_disable:
pm_runtime_disable(dsi->dev);
@@ -784,12 +1045,23 @@ static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
+ dma_free_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE, dsi->dcs_buf_virt,
+ dsi->dcs_buf_phys);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
}
+static const struct rzg2l_mipi_dsi_hw_info rzg2l_mipi_dsi_info = {
+ .dphy_init = rzg2l_mipi_dsi_dphy_init,
+ .dphy_exit = rzg2l_mipi_dsi_dphy_exit,
+ .dphy_conf_clks = rzg2l_dphy_conf_clks,
+ .link_reg_offset = 0x10000,
+ .min_dclk = 5803,
+ .max_dclk = 148500,
+};
+
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
- { .compatible = "renesas,rzg2l-mipi-dsi" },
+ { .compatible = "renesas,rzg2l-mipi-dsi", .data = &rzg2l_mipi_dsi_info, },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..d8082a87d874 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
@@ -41,8 +41,6 @@
#define DSIDPHYTIM3_THS_ZERO(x) ((x) << 0)
/* --------------------------------------------------------*/
-/* Link Registers */
-#define LINK_REG_OFFSET 0x10000
/* Link Status Register */
#define LINKSR 0x10
@@ -81,6 +79,20 @@
#define RSTSR_SWRSTLP (1 << 1)
#define RSTSR_SWRSTHS (1 << 0)
+/* DSI Set Register */
+#define DSISETR 0x120
+#define DSISETR_MRPSZ GENMASK(15, 0)
+
+/* Rx Result Save Slot 0 Register */
+#define RXRSS0R 0x240
+#define RXRSS0R_RXPKTDFAIL BIT(28)
+#define RXRSS0R_RXFAIL BIT(27)
+#define RXRSS0R_RXSUC BIT(25)
+#define RXRSS0R_DT GENMASK(21, 16)
+#define RXRSS0R_DATA1 GENMASK(15, 8)
+#define RXRSS0R_DATA0 GENMASK(7, 0)
+#define RXRSS0R_WC GENMASK(15, 0) /* Word count for long packet. */
+
/* Clock Lane Stop Time Set Register */
#define CLSTPTSETR 0x314
#define CLSTPTSETR_CLKKPT(x) ((x) << 24)
@@ -148,4 +160,44 @@
#define VICH1HPSETR_HFP(x) (((x) & 0x1fff) << 16)
#define VICH1HPSETR_HBP(x) (((x) & 0x1fff) << 0)
+/* Sequence Channel 0 Set 0 Register */
+#define SQCH0SET0R 0x5c0
+#define SQCH0SET0R_START BIT(0)
+
+/* Sequence Channel 0 Status Register */
+#define SQCH0SR 0x5d0
+#define SQCH0SR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Status Clear Register */
+#define SQCH0SCR 0x5d4
+#define SQCH0SCR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Descriptor 0-A Register */
+#define SQCH0DSC0AR 0x780
+#define SQCH0DSC0AR_NXACT_TERM 0 /* Bit 28 */
+#define SQCH0DSC0AR_BTA GENMASK(27, 26)
+#define SQCH0DSC0AR_BTA_NONE 0
+#define SQCH0DSC0AR_BTA_NON_READ 1
+#define SQCH0DSC0AR_BTA_READ 2
+#define SQCH0DSC0AR_BTA_ONLY 3
+#define SQCH0DSC0AR_SPD_HIGH 0
+#define SQCH0DSC0AR_SPD_LOW BIT(25)
+#define SQCH0DSC0AR_FMT_SHORT 0
+#define SQCH0DSC0AR_FMT_LONG BIT(24)
+#define SQCH0DSC0AR_DT GENMASK(21, 16)
+#define SQCH0DSC0AR_DATA1 GENMASK(15, 8)
+#define SQCH0DSC0AR_DATA0 GENMASK(7, 0)
+
+/* Sequence Channel 0 Descriptor 0-B Register */
+#define SQCH0DSC0BR 0x784
+#define SQCH0DSC0BR_DTSEL_MEM_SPACE BIT(24) /* Use external memory */
+
+/* Sequence Channel 0 Descriptor 0-C Register */
+#define SQCH0DSC0CR 0x788
+#define SQCH0DSC0CR_FINACT BIT(0)
+#define SQCH0DSC0CR_AUXOP BIT(22)
+
+/* Sequence Channel 0 Descriptor 0-D Register */
+#define SQCH0DSC0DR 0x78c
+
#endif /* __RZG2L_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
index 4202ab00fb0c..fd9460da1789 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
@@ -117,6 +117,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
static struct drm_framebuffer *
shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct shmob_drm_format_info *format;
@@ -144,7 +145,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26c4410b2407..ab525668939a 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,14 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ depends on OF
select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index a8265a1bf9ff..d30f0983a53a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -51,11 +52,13 @@ struct rockchip_grf_reg_field {
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
* @lcdc_sel: grf register field of lcdc_sel
+ * @edp_mode: grf register field of edp_mode
* @chip_type: specific chip type
* @reg: register base address
*/
struct rockchip_dp_chip_data {
const struct rockchip_grf_reg_field lcdc_sel;
+ const struct rockchip_grf_reg_field edp_mode;
u32 chip_type;
u32 reg;
};
@@ -70,6 +73,7 @@ struct rockchip_dp_device {
struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ struct reset_control *apbrst;
const struct rockchip_dp_chip_data *data;
@@ -115,6 +119,10 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
usleep_range(10, 20);
reset_control_deassert(dp->rst);
+ reset_control_assert(dp->apbrst);
+ usleep_range(10, 20);
+ reset_control_deassert(dp->apbrst);
+
return 0;
}
@@ -136,12 +144,21 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
return ret;
}
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
+
return ret;
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
clk_disable_unprepare(dp->pclk);
@@ -205,6 +222,10 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
+ struct of_endpoint endpoint;
+ struct device_node *remote_port, *remote_port_parent;
+ char name[32];
+ u32 port_id;
int ret;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
@@ -222,13 +243,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
return;
}
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ ret = drm_of_encoder_active_endpoint(dp->dev->of_node, encoder, &endpoint);
if (ret < 0)
return;
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ remote_port_parent = of_graph_get_remote_port_parent(endpoint.local_node);
+ if (remote_port_parent) {
+ if (of_get_child_by_name(remote_port_parent, "ports")) {
+ remote_port = of_graph_get_remote_port(endpoint.local_node);
+ of_property_read_u32(remote_port, "reg", &port_id);
+ of_node_put(remote_port);
+ sprintf(name, "%s vp%d", remote_port_parent->full_name, port_id);
+ } else {
+ sprintf(name, "%s %s",
+ remote_port_parent->full_name, endpoint.id ? "vopl" : "vopb");
+ }
+ of_node_put(remote_port_parent);
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ }
- ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, endpoint.id);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -322,6 +357,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
return PTR_ERR(dp->rst);
}
+ dp->apbrst = devm_reset_control_get_optional(dev, "apb");
+ if (IS_ERR(dp->apbrst)) {
+ DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
+ return PTR_ERR(dp->apbrst);
+ }
+
return 0;
}
@@ -392,11 +433,28 @@ static const struct component_ops rockchip_dp_component_ops = {
.unbind = rockchip_dp_unbind,
};
+static int rockchip_dp_link_panel(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_plat_data *plat_data = analogix_dp_aux_to_plat_data(aux);
+ struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ /*
+ * If drm_of_find_panel_or_bridge() returns -ENODEV, there may be no valid panel
+ * or bridge nodes. The driver should go on for the driver-free bridge or the DP
+ * mode applications.
+ */
+ ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 1, 0, &plat_data->panel, NULL);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return component_add(dp->dev, &rockchip_dp_component_ops);
+}
+
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rockchip_dp_chip_data *dp_data;
- struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
struct resource *res;
int i;
@@ -406,10 +464,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp_data)
return -ENODEV;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0 && ret != -ENODEV)
- return ret;
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -432,7 +486,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
@@ -448,9 +501,20 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- ret = component_add(dev, &rockchip_dp_component_ops);
- if (ret)
- return ret;
+ ret = devm_of_dp_aux_populate_bus(analogix_dp_get_aux(dp->adp), rockchip_dp_link_panel);
+ if (ret) {
+ /*
+ * If devm_of_dp_aux_populate_bus() returns -ENODEV, the done_probing() will not
+ * be called because there are no EP devices. Then the rockchip_dp_link_panel()
+ * will be called directly in order to support the other valid DT configurations.
+ *
+ * NOTE: The devm_of_dp_aux_populate_bus() is allowed to return -EPROBE_DEFER.
+ */
+ if (ret != -ENODEV)
+ return dev_err_probe(dp->dev, ret, "failed to populate aux bus\n");
+
+ return rockchip_dp_link_panel(analogix_dp_get_aux(dp->adp));
+ }
return 0;
}
@@ -501,9 +565,24 @@ static const struct rockchip_dp_chip_data rk3288_dp[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dp_chip_data rk3588_edp[] = {
+ {
+ .edp_mode = GRF_REG_FIELD(0x0000, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfdec0000,
+ },
+ {
+ .edp_mode = GRF_REG_FIELD(0x0004, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfded0000,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id rockchip_dp_dt_ids[] = {
{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
+ {.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 292c31de18f1..b7e3f5dcf8d5 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -16,7 +16,9 @@
#include <sound/hdmi-codec.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -25,9 +27,9 @@
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
-static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
+static inline struct cdn_dp_device *bridge_to_dp(struct drm_bridge *bridge)
{
- return container_of(connector, struct cdn_dp_device, connector);
+ return container_of(bridge, struct cdn_dp_device, bridge);
}
static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder)
@@ -231,9 +233,9 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
}
static enum drm_connector_status
-cdn_dp_connector_detect(struct drm_connector *connector, bool force)
+cdn_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
enum drm_connector_status status = connector_status_disconnected;
mutex_lock(&dp->lock);
@@ -244,41 +246,25 @@ cdn_dp_connector_detect(struct drm_connector *connector, bool force)
return status;
}
-static void cdn_dp_connector_destroy(struct drm_connector *connector)
+static const struct drm_edid *
+cdn_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
- .detect = cdn_dp_connector_detect,
- .destroy = cdn_dp_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int cdn_dp_connector_get_modes(struct drm_connector *connector)
-{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- int ret = 0;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ const struct drm_edid *drm_edid;
mutex_lock(&dp->lock);
-
- ret = drm_edid_connector_add_modes(connector);
-
+ drm_edid = drm_edid_read_custom(connector, cdn_dp_get_edid_block, dp);
mutex_unlock(&dp->lock);
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-cdn_dp_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+cdn_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
u32 requested, actual, rate, sink_max, source_max = 0;
u8 lanes, bpc;
@@ -323,11 +309,6 @@ cdn_dp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
- .get_modes = cdn_dp_connector_get_modes,
- .mode_valid = cdn_dp_connector_mode_valid,
-};
-
static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
{
int ret;
@@ -360,7 +341,6 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
- const struct drm_display_info *info = &dp->connector.display_info;
int ret;
if (!cdn_dp_check_sink_connection(dp))
@@ -373,17 +353,6 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
return ret;
}
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = drm_edid_read_custom(&dp->connector,
- cdn_dp_get_edid_block, dp);
- drm_edid_connector_update(&dp->connector, dp->drm_edid);
-
- dp->sink_has_audio = info->has_audio;
-
- if (dp->drm_edid)
- DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
- info->width_mm / 10, info->height_mm / 10);
-
return 0;
}
@@ -488,10 +457,6 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
dp->active = false;
dp->max_lanes = 0;
dp->max_rate = 0;
- if (!dp->connected) {
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
- }
return 0;
}
@@ -546,26 +511,13 @@ err_clk_disable:
return ret;
}
-static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void cdn_dp_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct video_info *video = &dp->video_info;
- switch (display_info->bpc) {
- case 10:
- video->color_depth = 10;
- break;
- case 6:
- video->color_depth = 6;
- break;
- default:
- video->color_depth = 8;
- break;
- }
-
video->color_fmt = PXL_RGB;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
@@ -592,19 +544,37 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
}
-static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp,
- bool plugged)
+static void cdn_dp_display_info_update(struct cdn_dp_device *dp,
+ struct drm_display_info *display_info)
{
- if (dp->codec_dev)
- dp->plugged_cb(dp->codec_dev, plugged);
+ struct video_info *video = &dp->video_info;
+
+ switch (display_info->bpc) {
+ case 10:
+ video->color_depth = 10;
+ break;
+ case 6:
+ video->color_depth = 6;
+ break;
+ default:
+ video->color_depth = 8;
+ break;
+ }
}
-static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
int ret, val;
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector)
+ return;
+
+ cdn_dp_display_info_update(dp, &connector->display_info);
+
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, &dp->encoder.encoder);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
return;
@@ -625,7 +595,7 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
ret = cdn_dp_enable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to enable bridge %d\n",
ret);
goto out;
}
@@ -655,24 +625,21 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
goto out;
}
- cdn_dp_audio_handle_plugged_change(dp, true);
-
out:
mutex_unlock(&dp->lock);
}
-static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
- cdn_dp_audio_handle_plugged_change(dp, false);
if (dp->active) {
ret = cdn_dp_disable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to disable bridge %d\n",
ret);
}
}
@@ -704,9 +671,6 @@ static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
- .mode_set = cdn_dp_encoder_mode_set,
- .enable = cdn_dp_encoder_enable,
- .disable = cdn_dp_encoder_disable,
.atomic_check = cdn_dp_encoder_atomic_check,
};
@@ -779,11 +743,12 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return 0;
}
-static int cdn_dp_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int cdn_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct audio_info audio = {
.sample_width = params->sample_width,
.sample_rate = params->sample_rate,
@@ -805,7 +770,7 @@ static int cdn_dp_audio_hw_params(struct device *dev, void *data,
audio.format = AFMT_SPDIF;
break;
default:
- DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
+ drm_err(bridge->dev, "Invalid format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
@@ -819,9 +784,10 @@ out:
return ret;
}
-static void cdn_dp_audio_shutdown(struct device *dev, void *data)
+static void cdn_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -835,10 +801,11 @@ out:
mutex_unlock(&dp->lock);
}
-static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
+static int cdn_dp_audio_mute_stream(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -854,57 +821,22 @@ out:
return ret;
}
-static int cdn_dp_audio_get_eld(struct device *dev, void *data,
- u8 *buf, size_t len)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
-
- return 0;
-}
-
-static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- mutex_lock(&dp->lock);
- dp->plugged_cb = fn;
- dp->codec_dev = codec_dev;
- cdn_dp_audio_handle_plugged_change(dp, dp->connected);
- mutex_unlock(&dp->lock);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = cdn_dp_audio_hw_params,
- .audio_shutdown = cdn_dp_audio_shutdown,
- .mute_stream = cdn_dp_audio_mute_stream,
- .get_eld = cdn_dp_audio_get_eld,
- .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
+static const struct drm_bridge_funcs cdn_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .detect = cdn_dp_bridge_detect,
+ .edid_read = cdn_dp_bridge_edid_read,
+ .atomic_enable = cdn_dp_bridge_atomic_enable,
+ .atomic_disable = cdn_dp_bridge_atomic_disable,
+ .mode_valid = cdn_dp_bridge_mode_valid,
+ .mode_set = cdn_dp_bridge_mode_set,
+
+ .dp_audio_prepare = cdn_dp_audio_prepare,
+ .dp_audio_mute_stream = cdn_dp_audio_mute_stream,
+ .dp_audio_shutdown = cdn_dp_audio_shutdown,
};
-static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .i2s = 1,
- .spdif = 1,
- .ops = &audio_codec_ops,
- .max_i2s_channels = 8,
- .no_capture_mute = 1,
- };
-
- dp->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(dp->audio_pdev);
-}
-
static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
{
int ret;
@@ -1006,7 +938,9 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
- drm_connector_helper_hpd_irq_event(&dp->connector);
+ drm_bridge_hpd_notify(&dp->bridge,
+ dp->connected ? connector_status_connected
+ : connector_status_disconnected);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
@@ -1062,26 +996,35 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
- connector = &dp->connector;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->dpms = DRM_MODE_DPMS_OFF;
-
- ret = drm_connector_init(drm_dev, connector,
- &cdn_dp_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
- goto err_free_encoder;
- }
+ dp->bridge.ops =
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DP_AUDIO;
+ dp->bridge.of_node = dp->dev->of_node;
+ dp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ dp->bridge.hdmi_audio_dev = dp->dev;
+ dp->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ dp->bridge.hdmi_audio_spdif_playback = 1;
+ dp->bridge.hdmi_audio_dai_port = -1;
+
+ ret = devm_drm_bridge_add(dev, &dp->bridge);
+ if (ret)
+ return ret;
- drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
+ ret = drm_bridge_attach(encoder, &dp->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("failed to attach connector and encoder\n");
- goto err_free_connector;
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dp->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
}
+ drm_connector_attach_encoder(connector, encoder);
+
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
@@ -1092,7 +1035,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
if (ret) {
DRM_DEV_ERROR(dev,
"register EXTCON_DISP_DP notifier err\n");
- goto err_free_connector;
+ return ret;
}
}
@@ -1101,30 +1044,19 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
schedule_work(&dp->event_work);
return 0;
-
-err_free_connector:
- drm_connector_cleanup(connector);
-err_free_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
}
static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder.encoder;
- struct drm_connector *connector = &dp->connector;
cancel_work_sync(&dp->event_work);
- cdn_dp_encoder_disable(encoder);
encoder->funcs->destroy(encoder);
- connector->funcs->destroy(connector);
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
@@ -1171,9 +1103,10 @@ static int cdn_dp_probe(struct platform_device *pdev)
int ret;
int i;
- dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(dev, struct cdn_dp_device, bridge,
+ &cdn_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = dev;
match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
@@ -1209,19 +1142,11 @@ static int cdn_dp_probe(struct platform_device *pdev)
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
- ret = cdn_dp_audio_codec_init(dp, dev);
- if (ret)
- return ret;
-
ret = component_add(dev, &cdn_dp_component_ops);
if (ret)
- goto err_audio_deinit;
+ return ret;
return 0;
-
-err_audio_deinit:
- platform_device_unregister(dp->audio_pdev);
- return ret;
}
static void cdn_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 17498f576ce7..e9c30b9fd543 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -8,6 +8,7 @@
#define _CDN_DP_CORE_H
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
@@ -65,12 +66,11 @@ struct cdn_dp_port {
struct cdn_dp_device {
struct device *dev;
struct drm_device *drm_dev;
- struct drm_connector connector;
+ struct drm_bridge bridge;
struct rockchip_encoder encoder;
struct drm_display_mode mode;
struct platform_device *audio_pdev;
struct work_struct event_work;
- const struct drm_edid *drm_edid;
struct mutex lock;
bool connected;
@@ -101,9 +101,5 @@ struct cdn_dp_device {
int active_port;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- bool sink_has_audio;
-
- hdmi_codec_plugged_cb plugged_cb;
- struct device *codec_dev;
};
#endif /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index f737e7d46e66..acb59b25d928 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -213,17 +213,13 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->ref_clk)) {
ret = PTR_ERR(hdmi->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get reference clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get reference clock\n");
}
hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf");
if (IS_ERR(hdmi->grf_clk)) {
ret = PTR_ERR(hdmi->grf_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get grf clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get grf clock\n");
}
ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9");
@@ -573,17 +569,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "Unable to parse OF data\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "Unable to parse OF data\n");
}
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get phy\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get phy\n");
}
if (hdmi->phy) {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 483ecfeaebb0..1ab3ad4bde9e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,10 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -27,10 +29,370 @@
#include "rockchip_drm_drv.h"
-#include "inno_hdmi.h"
-
#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define DDC_SEGMENT_ADDR 0x30
+
+#define HDMI_SCL_RATE (100 * 1000)
+
+#define DDC_BUS_FREQ_L 0x4b
+#define DDC_BUS_FREQ_H 0x4c
+
+#define HDMI_SYS_CTRL 0x00
+#define m_RST_ANALOG BIT(6)
+#define v_RST_ANALOG (0 << 6)
+#define v_NOT_RST_ANALOG BIT(6)
+#define m_RST_DIGITAL BIT(5)
+#define v_RST_DIGITAL (0 << 5)
+#define v_NOT_RST_DIGITAL BIT(5)
+#define m_REG_CLK_INV BIT(4)
+#define v_REG_CLK_NOT_INV (0 << 4)
+#define v_REG_CLK_INV BIT(4)
+#define m_VCLK_INV BIT(3)
+#define v_VCLK_NOT_INV (0 << 3)
+#define v_VCLK_INV BIT(3)
+#define m_REG_CLK_SOURCE BIT(2)
+#define v_REG_CLK_SOURCE_TMDS (0 << 2)
+#define v_REG_CLK_SOURCE_SYS BIT(2)
+#define m_POWER BIT(1)
+#define v_PWR_ON (0 << 1)
+#define v_PWR_OFF BIT(1)
+#define m_INT_POL BIT(0)
+#define v_INT_POL_HIGH 1
+#define v_INT_POL_LOW 0
+
+#define HDMI_VIDEO_CONTRL1 0x01
+#define m_VIDEO_INPUT_FORMAT (7 << 1)
+#define m_DE_SOURCE BIT(0)
+#define v_VIDEO_INPUT_FORMAT(n) ((n) << 1)
+#define v_DE_EXTERNAL 1
+#define v_DE_INTERNAL 0
+enum {
+ VIDEO_INPUT_SDR_RGB444 = 0,
+ VIDEO_INPUT_DDR_RGB444 = 5,
+ VIDEO_INPUT_DDR_YCBCR422 = 6
+};
+
+#define HDMI_VIDEO_CONTRL2 0x02
+#define m_VIDEO_OUTPUT_COLOR (3 << 6)
+#define m_VIDEO_INPUT_BITS (3 << 4)
+#define m_VIDEO_INPUT_CSP BIT(0)
+#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
+#define v_VIDEO_INPUT_BITS(n) ((n) << 4)
+#define v_VIDEO_INPUT_CSP(n) ((n) << 0)
+enum {
+ VIDEO_INPUT_12BITS = 0,
+ VIDEO_INPUT_10BITS = 1,
+ VIDEO_INPUT_REVERT = 2,
+ VIDEO_INPUT_8BITS = 3,
+};
+
+#define HDMI_VIDEO_CONTRL 0x03
+#define m_VIDEO_AUTO_CSC BIT(7)
+#define v_VIDEO_AUTO_CSC(n) ((n) << 7)
+#define m_VIDEO_C0_C2_SWAP BIT(0)
+#define v_VIDEO_C0_C2_SWAP(n) ((n) << 0)
+enum {
+ C0_C2_CHANGE_ENABLE = 0,
+ C0_C2_CHANGE_DISABLE = 1,
+ AUTO_CSC_DISABLE = 0,
+ AUTO_CSC_ENABLE = 1,
+};
+
+#define HDMI_VIDEO_CONTRL3 0x04
+#define m_COLOR_DEPTH_NOT_INDICATED BIT(4)
+#define m_SOF BIT(3)
+#define m_COLOR_RANGE BIT(2)
+#define m_CSC BIT(0)
+#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
+#define v_SOF_ENABLE (0 << 3)
+#define v_SOF_DISABLE BIT(3)
+#define v_COLOR_RANGE_FULL BIT(2)
+#define v_COLOR_RANGE_LIMITED (0 << 2)
+#define v_CSC_ENABLE 1
+#define v_CSC_DISABLE 0
+
+#define HDMI_AV_MUTE 0x05
+#define m_AVMUTE_CLEAR BIT(7)
+#define m_AVMUTE_ENABLE BIT(6)
+#define m_AUDIO_MUTE BIT(1)
+#define m_VIDEO_BLACK BIT(0)
+#define v_AVMUTE_CLEAR(n) ((n) << 7)
+#define v_AVMUTE_ENABLE(n) ((n) << 6)
+#define v_AUDIO_MUTE(n) ((n) << 1)
+#define v_VIDEO_MUTE(n) ((n) << 0)
+
+#define HDMI_VIDEO_TIMING_CTL 0x08
+#define v_HSYNC_POLARITY(n) ((n) << 3)
+#define v_VSYNC_POLARITY(n) ((n) << 2)
+#define v_INETLACE(n) ((n) << 1)
+#define v_EXTERANL_VIDEO(n) ((n) << 0)
+
+#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
+#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
+#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
+#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
+#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
+#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
+#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
+#define HDMI_VIDEO_EXT_HDURATION_H 0x10
+#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
+#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
+#define HDMI_VIDEO_EXT_VBLANK 0x13
+#define HDMI_VIDEO_EXT_VDELAY 0x14
+#define HDMI_VIDEO_EXT_VDURATION 0x15
+
+#define HDMI_VIDEO_CSC_COEF 0x18
+
+#define HDMI_AUDIO_CTRL1 0x35
+enum {
+ CTS_SOURCE_INTERNAL = 0,
+ CTS_SOURCE_EXTERNAL = 1,
+};
+
+#define v_CTS_SOURCE(n) ((n) << 7)
+
+enum {
+ DOWNSAMPLE_DISABLE = 0,
+ DOWNSAMPLE_1_2 = 1,
+ DOWNSAMPLE_1_4 = 2,
+};
+
+#define v_DOWN_SAMPLE(n) ((n) << 5)
+
+enum {
+ AUDIO_SOURCE_IIS = 0,
+ AUDIO_SOURCE_SPDIF = 1,
+};
+
+#define v_AUDIO_SOURCE(n) ((n) << 3)
+
+#define v_MCLK_ENABLE(n) ((n) << 2)
+
+enum {
+ MCLK_128FS = 0,
+ MCLK_256FS = 1,
+ MCLK_384FS = 2,
+ MCLK_512FS = 3,
+};
+
+#define v_MCLK_RATIO(n) (n)
+
+#define AUDIO_SAMPLE_RATE 0x37
+
+enum {
+ AUDIO_32K = 0x3,
+ AUDIO_441K = 0x0,
+ AUDIO_48K = 0x2,
+ AUDIO_882K = 0x8,
+ AUDIO_96K = 0xa,
+ AUDIO_1764K = 0xc,
+ AUDIO_192K = 0xe,
+};
+
+#define AUDIO_I2S_MODE 0x38
+
+enum {
+ I2S_CHANNEL_1_2 = 1,
+ I2S_CHANNEL_3_4 = 3,
+ I2S_CHANNEL_5_6 = 7,
+ I2S_CHANNEL_7_8 = 0xf
+};
+
+#define v_I2S_CHANNEL(n) ((n) << 2)
+
+enum {
+ I2S_STANDARD = 0,
+ I2S_LEFT_JUSTIFIED = 1,
+ I2S_RIGHT_JUSTIFIED = 2,
+};
+
+#define v_I2S_MODE(n) (n)
+
+#define AUDIO_I2S_MAP 0x39
+#define AUDIO_I2S_SWAPS_SPDIF 0x3a
+#define v_SPIDF_FREQ(n) (n)
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
+#define m_AUDIO_STATUS_NLPCM BIT(7)
+#define m_AUDIO_STATUS_USE BIT(6)
+#define m_AUDIO_STATUS_COPYRIGHT BIT(5)
+#define m_AUDIO_STATUS_ADDITION (3 << 2)
+#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
+#define v_AUDIO_STATUS_NLPCM(n) (((n) & 1) << 7)
+#define AUDIO_N_H 0x3f
+#define AUDIO_N_M 0x40
+#define AUDIO_N_L 0x41
+
+#define HDMI_AUDIO_CTS_H 0x45
+#define HDMI_AUDIO_CTS_M 0x46
+#define HDMI_AUDIO_CTS_L 0x47
+
+#define HDMI_DDC_CLK_L 0x4b
+#define HDMI_DDC_CLK_H 0x4c
+
+#define HDMI_EDID_SEGMENT_POINTER 0x4d
+#define HDMI_EDID_WORD_ADDR 0x4e
+#define HDMI_EDID_FIFO_OFFSET 0x4f
+#define HDMI_EDID_FIFO_ADDR 0x50
+
+#define HDMI_PACKET_SEND_MANUAL 0x9c
+#define HDMI_PACKET_SEND_AUTO 0x9d
+#define m_PACKET_GCP_EN BIT(7)
+#define m_PACKET_MSI_EN BIT(6)
+#define m_PACKET_SDI_EN BIT(5)
+#define m_PACKET_VSI_EN BIT(4)
+#define v_PACKET_GCP_EN(n) (((n) & 1) << 7)
+#define v_PACKET_MSI_EN(n) (((n) & 1) << 6)
+#define v_PACKET_SDI_EN(n) (((n) & 1) << 5)
+#define v_PACKET_VSI_EN(n) (((n) & 1) << 4)
+
+#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
+
+enum {
+ INFOFRAME_VSI = 0x05,
+ INFOFRAME_AVI = 0x06,
+ INFOFRAME_AAI = 0x08,
+};
+
+#define HDMI_CONTROL_PACKET_ADDR 0xa0
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+enum {
+ AVI_COLOR_MODE_RGB = 0,
+ AVI_COLOR_MODE_YCBCR422 = 1,
+ AVI_COLOR_MODE_YCBCR444 = 2,
+ AVI_COLORIMETRY_NO_DATA = 0,
+
+ AVI_COLORIMETRY_SMPTE_170M = 1,
+ AVI_COLORIMETRY_ITU709 = 2,
+ AVI_COLORIMETRY_EXTENDED = 3,
+
+ AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
+ AVI_CODED_FRAME_ASPECT_4_3 = 1,
+ AVI_CODED_FRAME_ASPECT_16_9 = 2,
+
+ ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
+ ACTIVE_ASPECT_RATE_4_3 = 0x09,
+ ACTIVE_ASPECT_RATE_16_9 = 0x0A,
+ ACTIVE_ASPECT_RATE_14_9 = 0x0B,
+};
+
+#define HDMI_HDCP_CTRL 0x52
+#define m_HDMI_DVI BIT(1)
+#define v_HDMI_DVI(n) ((n) << 1)
+
+#define HDMI_INTERRUPT_MASK1 0xc0
+#define HDMI_INTERRUPT_STATUS1 0xc1
+#define m_INT_ACTIVE_VSYNC BIT(5)
+#define m_INT_EDID_READY BIT(2)
+
+#define HDMI_INTERRUPT_MASK2 0xc2
+#define HDMI_INTERRUPT_STATUS2 0xc3
+#define m_INT_HDCP_ERR BIT(7)
+#define m_INT_BKSV_FLAG BIT(6)
+#define m_INT_HDCP_OK BIT(4)
+
+#define HDMI_STATUS 0xc8
+#define m_HOTPLUG BIT(7)
+#define m_MASK_INT_HOTPLUG BIT(5)
+#define m_INT_HOTPLUG BIT(1)
+#define v_MASK_INT_HOTPLUG(n) (((n) & 0x1) << 5)
+
+#define HDMI_COLORBAR 0xc9
+
+#define HDMI_PHY_SYNC 0xce
+#define HDMI_PHY_SYS_CTL 0xe0
+#define m_TMDS_CLK_SOURCE BIT(5)
+#define v_TMDS_FROM_PLL (0 << 5)
+#define v_TMDS_FROM_GEN BIT(5)
+#define m_PHASE_CLK BIT(4)
+#define v_DEFAULT_PHASE (0 << 4)
+#define v_SYNC_PHASE BIT(4)
+#define m_TMDS_CURRENT_PWR BIT(3)
+#define v_TURN_ON_CURRENT (0 << 3)
+#define v_CAT_OFF_CURRENT BIT(3)
+#define m_BANDGAP_PWR BIT(2)
+#define v_BANDGAP_PWR_UP (0 << 2)
+#define v_BANDGAP_PWR_DOWN BIT(2)
+#define m_PLL_PWR BIT(1)
+#define v_PLL_PWR_UP (0 << 1)
+#define v_PLL_PWR_DOWN BIT(1)
+#define m_TMDS_CHG_PWR BIT(0)
+#define v_TMDS_CHG_PWR_UP (0 << 0)
+#define v_TMDS_CHG_PWR_DOWN BIT(0)
+
+#define HDMI_PHY_CHG_PWR 0xe1
+#define v_CLK_CHG_PWR(n) (((n) & 1) << 3)
+#define v_DATA_CHG_PWR(n) (((n) & 7) << 0)
+
+#define HDMI_PHY_DRIVER 0xe2
+#define v_CLK_MAIN_DRIVER(n) ((n) << 4)
+#define v_DATA_MAIN_DRIVER(n) ((n) << 0)
+
+#define HDMI_PHY_PRE_EMPHASIS 0xe3
+#define v_PRE_EMPHASIS(n) (((n) & 7) << 4)
+#define v_CLK_PRE_DRIVER(n) (((n) & 3) << 2)
+#define v_DATA_PRE_DRIVER(n) (((n) & 3) << 0)
+
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
+#define v_FEEDBACK_DIV_LOW(n) ((n) & 0xff)
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
+#define v_FEEDBACK_DIV_HIGH(n) ((n) & 1)
+
+#define HDMI_PHY_PRE_DIV_RATIO 0xed
+#define v_PRE_DIV_RATIO(n) ((n) & 0x1f)
+
+#define HDMI_CEC_CTRL 0xd0
+#define m_ADJUST_FOR_HISENSE BIT(6)
+#define m_REJECT_RX_BROADCAST BIT(5)
+#define m_BUSFREETIME_ENABLE BIT(2)
+#define m_REJECT_RX BIT(1)
+#define m_START_TX BIT(0)
+
+#define HDMI_CEC_DATA 0xd1
+#define HDMI_CEC_TX_OFFSET 0xd2
+#define HDMI_CEC_RX_OFFSET 0xd3
+#define HDMI_CEC_CLK_H 0xd4
+#define HDMI_CEC_CLK_L 0xd5
+#define HDMI_CEC_TX_LENGTH 0xd6
+#define HDMI_CEC_RX_LENGTH 0xd7
+#define HDMI_CEC_TX_INT_MASK 0xd8
+#define m_TX_DONE BIT(3)
+#define m_TX_NOACK BIT(2)
+#define m_TX_BROADCAST_REJ BIT(1)
+#define m_TX_BUSNOTFREE BIT(0)
+
+#define HDMI_CEC_RX_INT_MASK 0xd9
+#define m_RX_LA_ERR BIT(4)
+#define m_RX_GLITCH BIT(3)
+#define m_RX_DONE BIT(0)
+
+#define HDMI_CEC_TX_INT 0xda
+#define HDMI_CEC_RX_INT 0xdb
+#define HDMI_CEC_BUSFREETIME_L 0xdc
+#define HDMI_CEC_BUSFREETIME_H 0xdd
+#define HDMI_CEC_LOGICADDR 0xde
+
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+
+#define RK3036_GRF_SOC_CON2 0x148
+#define RK3036_HDMI_PHSYNC BIT(4)
+#define RK3036_HDMI_PVSYNC BIT(5)
+
+enum inno_hdmi_dev_type {
+ RK3036_HDMI,
+ RK3128_HDMI,
+};
+
struct inno_hdmi_phy_config {
unsigned long pixelclock;
u8 pre_emphasis;
@@ -38,6 +400,7 @@ struct inno_hdmi_phy_config {
};
struct inno_hdmi_variant {
+ enum inno_hdmi_dev_type dev_type;
struct inno_hdmi_phy_config *phy_configs;
struct inno_hdmi_phy_config *default_phy_config;
};
@@ -58,6 +421,7 @@ struct inno_hdmi {
struct clk *pclk;
struct clk *refclk;
void __iomem *regs;
+ struct regmap *grf;
struct drm_connector connector;
struct rockchip_encoder encoder;
@@ -240,22 +604,37 @@ static void inno_hdmi_power_up(struct inno_hdmi *hdmi,
inno_hdmi_sys_power(hdmi, true);
};
-static void inno_hdmi_reset(struct inno_hdmi *hdmi)
+static void inno_hdmi_init_hw(struct inno_hdmi *hdmi)
{
u32 val;
u32 msk;
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
- udelay(100);
+ usleep_range(100, 150);
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
- udelay(100);
+ usleep_range(100, 150);
msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
inno_hdmi_standby(hdmi);
+
+ /*
+ * When the controller isn't configured to an accurate
+ * video timing and there is no reference clock available,
+ * then the TMDS clock source would be switched to PCLK_HDMI,
+ * so we need to init the TMDS rate to PCLK rate, and
+ * reconfigure the DDC clock.
+ */
+ if (hdmi->refclk)
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
+ else
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+
+ /* Unmute hotplug interrupt */
+ hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
}
static int inno_hdmi_disable_frame(struct drm_connector *connector,
@@ -374,7 +753,15 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
- int value;
+ int value, psync;
+
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
+ value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
+ value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
+ }
/* Set detail external video timing polarity and interlace mode */
value = v_EXTERANL_VIDEO(1);
@@ -752,8 +1139,7 @@ static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
* we assume that each word write to this i2c adapter
* should be the offset of EDID word address.
*/
- if ((msgs->len != 1) ||
- ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
+ if (msgs->len != 1 || (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
return -EINVAL;
reinit_completion(&hdmi->i2c->cmp);
@@ -844,10 +1230,9 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
- devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
@@ -884,69 +1269,37 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
- hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
- if (IS_ERR(hdmi->pclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
- return PTR_ERR(hdmi->pclk);
- }
+ hdmi->pclk = devm_clk_get_enabled(hdmi->dev, "pclk");
+ if (IS_ERR(hdmi->pclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
- ret = clk_prepare_enable(hdmi->pclk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI pclk clock: %d\n", ret);
- return ret;
- }
+ hdmi->refclk = devm_clk_get_optional_enabled(hdmi->dev, "ref");
+ if (IS_ERR(hdmi->refclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
- hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
- if (IS_ERR(hdmi->refclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
- ret = PTR_ERR(hdmi->refclk);
- goto err_disable_pclk;
- }
-
- ret = clk_prepare_enable(hdmi->refclk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI reference clock: %d\n", ret);
- goto err_disable_pclk;
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(hdmi->grf))
+ return dev_err_probe(dev,
+ PTR_ERR(hdmi->grf), "Unable to get rockchip,grf\n");
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_disable_clk;
- }
+ if (irq < 0)
+ return irq;
- inno_hdmi_reset(hdmi);
+ inno_hdmi_init_hw(hdmi);
hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_clk;
- }
-
- /*
- * When the controller isn't configured to an accurate
- * video timing and there is no reference clock available,
- * then the TMDS clock source would be switched to PCLK_HDMI,
- * so we need to init the TMDS rate to PCLK rate, and
- * reconfigure the DDC clock.
- */
- if (hdmi->refclk)
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
- else
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+ if (IS_ERR(hdmi->ddc))
+ return PTR_ERR(hdmi->ddc);
ret = inno_hdmi_register(drm, hdmi);
if (ret)
- goto err_put_adapter;
+ return ret;
dev_set_drvdata(dev, hdmi);
- /* Unmute hotplug interrupt */
- hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
-
ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
inno_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
@@ -957,12 +1310,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_hdmi:
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_put_adapter:
- i2c_put_adapter(hdmi->ddc);
-err_disable_clk:
- clk_disable_unprepare(hdmi->refclk);
-err_disable_pclk:
- clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -973,10 +1320,6 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-
- i2c_put_adapter(hdmi->ddc);
- clk_disable_unprepare(hdmi->refclk);
- clk_disable_unprepare(hdmi->pclk);
}
static const struct component_ops inno_hdmi_ops = {
@@ -995,11 +1338,13 @@ static void inno_hdmi_remove(struct platform_device *pdev)
}
static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .dev_type = RK3036_HDMI,
.phy_configs = rk3036_hdmi_phy_configs,
.default_phy_config = &rk3036_hdmi_phy_configs[1],
};
static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .dev_type = RK3128_HDMI,
.phy_configs = rk3128_hdmi_phy_configs,
.default_phy_config = &rk3128_hdmi_phy_configs[1],
};
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
deleted file mode 100644
index 8b7ef3fac485..000000000000
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Rockchip Electronics Co., Ltd.
- * Zheng Yang <zhengyang@rock-chips.com>
- * Yakir Yang <ykk@rock-chips.com>
- */
-
-#ifndef __INNO_HDMI_H__
-#define __INNO_HDMI_H__
-
-#define DDC_SEGMENT_ADDR 0x30
-
-#define HDMI_SCL_RATE (100*1000)
-#define DDC_BUS_FREQ_L 0x4b
-#define DDC_BUS_FREQ_H 0x4c
-
-#define HDMI_SYS_CTRL 0x00
-#define m_RST_ANALOG (1 << 6)
-#define v_RST_ANALOG (0 << 6)
-#define v_NOT_RST_ANALOG (1 << 6)
-#define m_RST_DIGITAL (1 << 5)
-#define v_RST_DIGITAL (0 << 5)
-#define v_NOT_RST_DIGITAL (1 << 5)
-#define m_REG_CLK_INV (1 << 4)
-#define v_REG_CLK_NOT_INV (0 << 4)
-#define v_REG_CLK_INV (1 << 4)
-#define m_VCLK_INV (1 << 3)
-#define v_VCLK_NOT_INV (0 << 3)
-#define v_VCLK_INV (1 << 3)
-#define m_REG_CLK_SOURCE (1 << 2)
-#define v_REG_CLK_SOURCE_TMDS (0 << 2)
-#define v_REG_CLK_SOURCE_SYS (1 << 2)
-#define m_POWER (1 << 1)
-#define v_PWR_ON (0 << 1)
-#define v_PWR_OFF (1 << 1)
-#define m_INT_POL (1 << 0)
-#define v_INT_POL_HIGH 1
-#define v_INT_POL_LOW 0
-
-#define HDMI_VIDEO_CONTRL1 0x01
-#define m_VIDEO_INPUT_FORMAT (7 << 1)
-#define m_DE_SOURCE (1 << 0)
-#define v_VIDEO_INPUT_FORMAT(n) (n << 1)
-#define v_DE_EXTERNAL 1
-#define v_DE_INTERNAL 0
-enum {
- VIDEO_INPUT_SDR_RGB444 = 0,
- VIDEO_INPUT_DDR_RGB444 = 5,
- VIDEO_INPUT_DDR_YCBCR422 = 6
-};
-
-#define HDMI_VIDEO_CONTRL2 0x02
-#define m_VIDEO_OUTPUT_COLOR (3 << 6)
-#define m_VIDEO_INPUT_BITS (3 << 4)
-#define m_VIDEO_INPUT_CSP (1 << 0)
-#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
-#define v_VIDEO_INPUT_BITS(n) (n << 4)
-#define v_VIDEO_INPUT_CSP(n) (n << 0)
-enum {
- VIDEO_INPUT_12BITS = 0,
- VIDEO_INPUT_10BITS = 1,
- VIDEO_INPUT_REVERT = 2,
- VIDEO_INPUT_8BITS = 3,
-};
-
-#define HDMI_VIDEO_CONTRL 0x03
-#define m_VIDEO_AUTO_CSC (1 << 7)
-#define v_VIDEO_AUTO_CSC(n) (n << 7)
-#define m_VIDEO_C0_C2_SWAP (1 << 0)
-#define v_VIDEO_C0_C2_SWAP(n) (n << 0)
-enum {
- C0_C2_CHANGE_ENABLE = 0,
- C0_C2_CHANGE_DISABLE = 1,
- AUTO_CSC_DISABLE = 0,
- AUTO_CSC_ENABLE = 1,
-};
-
-#define HDMI_VIDEO_CONTRL3 0x04
-#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4)
-#define m_SOF (1 << 3)
-#define m_COLOR_RANGE (1 << 2)
-#define m_CSC (1 << 0)
-#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
-#define v_SOF_ENABLE (0 << 3)
-#define v_SOF_DISABLE (1 << 3)
-#define v_COLOR_RANGE_FULL (1 << 2)
-#define v_COLOR_RANGE_LIMITED (0 << 2)
-#define v_CSC_ENABLE 1
-#define v_CSC_DISABLE 0
-
-#define HDMI_AV_MUTE 0x05
-#define m_AVMUTE_CLEAR (1 << 7)
-#define m_AVMUTE_ENABLE (1 << 6)
-#define m_AUDIO_MUTE (1 << 1)
-#define m_VIDEO_BLACK (1 << 0)
-#define v_AVMUTE_CLEAR(n) (n << 7)
-#define v_AVMUTE_ENABLE(n) (n << 6)
-#define v_AUDIO_MUTE(n) (n << 1)
-#define v_VIDEO_MUTE(n) (n << 0)
-
-#define HDMI_VIDEO_TIMING_CTL 0x08
-#define v_HSYNC_POLARITY(n) (n << 3)
-#define v_VSYNC_POLARITY(n) (n << 2)
-#define v_INETLACE(n) (n << 1)
-#define v_EXTERANL_VIDEO(n) (n << 0)
-
-#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
-#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
-#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
-#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
-#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
-#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
-#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
-#define HDMI_VIDEO_EXT_HDURATION_H 0x10
-#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
-#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
-#define HDMI_VIDEO_EXT_VBLANK 0x13
-#define HDMI_VIDEO_EXT_VDELAY 0x14
-#define HDMI_VIDEO_EXT_VDURATION 0x15
-
-#define HDMI_VIDEO_CSC_COEF 0x18
-
-#define HDMI_AUDIO_CTRL1 0x35
-enum {
- CTS_SOURCE_INTERNAL = 0,
- CTS_SOURCE_EXTERNAL = 1,
-};
-#define v_CTS_SOURCE(n) (n << 7)
-
-enum {
- DOWNSAMPLE_DISABLE = 0,
- DOWNSAMPLE_1_2 = 1,
- DOWNSAMPLE_1_4 = 2,
-};
-#define v_DOWN_SAMPLE(n) (n << 5)
-
-enum {
- AUDIO_SOURCE_IIS = 0,
- AUDIO_SOURCE_SPDIF = 1,
-};
-#define v_AUDIO_SOURCE(n) (n << 3)
-
-#define v_MCLK_ENABLE(n) (n << 2)
-enum {
- MCLK_128FS = 0,
- MCLK_256FS = 1,
- MCLK_384FS = 2,
- MCLK_512FS = 3,
-};
-#define v_MCLK_RATIO(n) (n)
-
-#define AUDIO_SAMPLE_RATE 0x37
-enum {
- AUDIO_32K = 0x3,
- AUDIO_441K = 0x0,
- AUDIO_48K = 0x2,
- AUDIO_882K = 0x8,
- AUDIO_96K = 0xa,
- AUDIO_1764K = 0xc,
- AUDIO_192K = 0xe,
-};
-
-#define AUDIO_I2S_MODE 0x38
-enum {
- I2S_CHANNEL_1_2 = 1,
- I2S_CHANNEL_3_4 = 3,
- I2S_CHANNEL_5_6 = 7,
- I2S_CHANNEL_7_8 = 0xf
-};
-#define v_I2S_CHANNEL(n) ((n) << 2)
-enum {
- I2S_STANDARD = 0,
- I2S_LEFT_JUSTIFIED = 1,
- I2S_RIGHT_JUSTIFIED = 2,
-};
-#define v_I2S_MODE(n) (n)
-
-#define AUDIO_I2S_MAP 0x39
-#define AUDIO_I2S_SWAPS_SPDIF 0x3a
-#define v_SPIDF_FREQ(n) (n)
-
-#define N_32K 0x1000
-#define N_441K 0x1880
-#define N_882K 0x3100
-#define N_1764K 0x6200
-#define N_48K 0x1800
-#define N_96K 0x3000
-#define N_192K 0x6000
-
-#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
-#define m_AUDIO_STATUS_NLPCM (1 << 7)
-#define m_AUDIO_STATUS_USE (1 << 6)
-#define m_AUDIO_STATUS_COPYRIGHT (1 << 5)
-#define m_AUDIO_STATUS_ADDITION (3 << 2)
-#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
-#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7)
-#define AUDIO_N_H 0x3f
-#define AUDIO_N_M 0x40
-#define AUDIO_N_L 0x41
-
-#define HDMI_AUDIO_CTS_H 0x45
-#define HDMI_AUDIO_CTS_M 0x46
-#define HDMI_AUDIO_CTS_L 0x47
-
-#define HDMI_DDC_CLK_L 0x4b
-#define HDMI_DDC_CLK_H 0x4c
-
-#define HDMI_EDID_SEGMENT_POINTER 0x4d
-#define HDMI_EDID_WORD_ADDR 0x4e
-#define HDMI_EDID_FIFO_OFFSET 0x4f
-#define HDMI_EDID_FIFO_ADDR 0x50
-
-#define HDMI_PACKET_SEND_MANUAL 0x9c
-#define HDMI_PACKET_SEND_AUTO 0x9d
-#define m_PACKET_GCP_EN (1 << 7)
-#define m_PACKET_MSI_EN (1 << 6)
-#define m_PACKET_SDI_EN (1 << 5)
-#define m_PACKET_VSI_EN (1 << 4)
-#define v_PACKET_GCP_EN(n) ((n & 1) << 7)
-#define v_PACKET_MSI_EN(n) ((n & 1) << 6)
-#define v_PACKET_SDI_EN(n) ((n & 1) << 5)
-#define v_PACKET_VSI_EN(n) ((n & 1) << 4)
-
-#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
-enum {
- INFOFRAME_VSI = 0x05,
- INFOFRAME_AVI = 0x06,
- INFOFRAME_AAI = 0x08,
-};
-
-#define HDMI_CONTROL_PACKET_ADDR 0xa0
-#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
-enum {
- AVI_COLOR_MODE_RGB = 0,
- AVI_COLOR_MODE_YCBCR422 = 1,
- AVI_COLOR_MODE_YCBCR444 = 2,
- AVI_COLORIMETRY_NO_DATA = 0,
-
- AVI_COLORIMETRY_SMPTE_170M = 1,
- AVI_COLORIMETRY_ITU709 = 2,
- AVI_COLORIMETRY_EXTENDED = 3,
-
- AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
- AVI_CODED_FRAME_ASPECT_4_3 = 1,
- AVI_CODED_FRAME_ASPECT_16_9 = 2,
-
- ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
- ACTIVE_ASPECT_RATE_4_3 = 0x09,
- ACTIVE_ASPECT_RATE_16_9 = 0x0A,
- ACTIVE_ASPECT_RATE_14_9 = 0x0B,
-};
-
-#define HDMI_HDCP_CTRL 0x52
-#define m_HDMI_DVI (1 << 1)
-#define v_HDMI_DVI(n) (n << 1)
-
-#define HDMI_INTERRUPT_MASK1 0xc0
-#define HDMI_INTERRUPT_STATUS1 0xc1
-#define m_INT_ACTIVE_VSYNC (1 << 5)
-#define m_INT_EDID_READY (1 << 2)
-
-#define HDMI_INTERRUPT_MASK2 0xc2
-#define HDMI_INTERRUPT_STATUS2 0xc3
-#define m_INT_HDCP_ERR (1 << 7)
-#define m_INT_BKSV_FLAG (1 << 6)
-#define m_INT_HDCP_OK (1 << 4)
-
-#define HDMI_STATUS 0xc8
-#define m_HOTPLUG (1 << 7)
-#define m_MASK_INT_HOTPLUG (1 << 5)
-#define m_INT_HOTPLUG (1 << 1)
-#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5)
-
-#define HDMI_COLORBAR 0xc9
-
-#define HDMI_PHY_SYNC 0xce
-#define HDMI_PHY_SYS_CTL 0xe0
-#define m_TMDS_CLK_SOURCE (1 << 5)
-#define v_TMDS_FROM_PLL (0 << 5)
-#define v_TMDS_FROM_GEN (1 << 5)
-#define m_PHASE_CLK (1 << 4)
-#define v_DEFAULT_PHASE (0 << 4)
-#define v_SYNC_PHASE (1 << 4)
-#define m_TMDS_CURRENT_PWR (1 << 3)
-#define v_TURN_ON_CURRENT (0 << 3)
-#define v_CAT_OFF_CURRENT (1 << 3)
-#define m_BANDGAP_PWR (1 << 2)
-#define v_BANDGAP_PWR_UP (0 << 2)
-#define v_BANDGAP_PWR_DOWN (1 << 2)
-#define m_PLL_PWR (1 << 1)
-#define v_PLL_PWR_UP (0 << 1)
-#define v_PLL_PWR_DOWN (1 << 1)
-#define m_TMDS_CHG_PWR (1 << 0)
-#define v_TMDS_CHG_PWR_UP (0 << 0)
-#define v_TMDS_CHG_PWR_DOWN (1 << 0)
-
-#define HDMI_PHY_CHG_PWR 0xe1
-#define v_CLK_CHG_PWR(n) ((n & 1) << 3)
-#define v_DATA_CHG_PWR(n) ((n & 7) << 0)
-
-#define HDMI_PHY_DRIVER 0xe2
-#define v_CLK_MAIN_DRIVER(n) (n << 4)
-#define v_DATA_MAIN_DRIVER(n) (n << 0)
-
-#define HDMI_PHY_PRE_EMPHASIS 0xe3
-#define v_PRE_EMPHASIS(n) ((n & 7) << 4)
-#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2)
-#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0)
-
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
-#define v_FEEDBACK_DIV_LOW(n) (n & 0xff)
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
-#define v_FEEDBACK_DIV_HIGH(n) (n & 1)
-
-#define HDMI_PHY_PRE_DIV_RATIO 0xed
-#define v_PRE_DIV_RATIO(n) (n & 0x1f)
-
-#define HDMI_CEC_CTRL 0xd0
-#define m_ADJUST_FOR_HISENSE (1 << 6)
-#define m_REJECT_RX_BROADCAST (1 << 5)
-#define m_BUSFREETIME_ENABLE (1 << 2)
-#define m_REJECT_RX (1 << 1)
-#define m_START_TX (1 << 0)
-
-#define HDMI_CEC_DATA 0xd1
-#define HDMI_CEC_TX_OFFSET 0xd2
-#define HDMI_CEC_RX_OFFSET 0xd3
-#define HDMI_CEC_CLK_H 0xd4
-#define HDMI_CEC_CLK_L 0xd5
-#define HDMI_CEC_TX_LENGTH 0xd6
-#define HDMI_CEC_RX_LENGTH 0xd7
-#define HDMI_CEC_TX_INT_MASK 0xd8
-#define m_TX_DONE (1 << 3)
-#define m_TX_NOACK (1 << 2)
-#define m_TX_BROADCAST_REJ (1 << 1)
-#define m_TX_BUSNOTFREE (1 << 0)
-
-#define HDMI_CEC_RX_INT_MASK 0xd9
-#define m_RX_LA_ERR (1 << 4)
-#define m_RX_GLITCH (1 << 3)
-#define m_RX_DONE (1 << 0)
-
-#define HDMI_CEC_TX_INT 0xda
-#define HDMI_CEC_RX_INT 0xdb
-#define HDMI_CEC_BUSFREETIME_L 0xdc
-#define HDMI_CEC_BUSFREETIME_H 0xdd
-#define HDMI_CEC_LOGICADDR 0xde
-
-#endif /* __INNO_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index f7a460190313..ae4a5ac2299a 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -5,6 +5,9 @@
*/
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -46,27 +49,20 @@ struct rk3066_hdmi {
struct clk *hclk;
void __iomem *regs;
- struct drm_connector connector;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
struct rockchip_encoder encoder;
struct rk3066_hdmi_i2c *i2c;
- struct i2c_adapter *ddc;
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
};
-static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
+static struct rk3066_hdmi *bridge_to_rk3066_hdmi(struct drm_bridge *bridge)
{
- struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
-
- return container_of(rkencoder, struct rk3066_hdmi, encoder);
-}
-
-static struct rk3066_hdmi *connector_to_rk3066_hdmi(struct drm_connector *connector)
-{
- return container_of(connector, struct rk3066_hdmi, connector);
+ return container_of(bridge, struct rk3066_hdmi, bridge);
}
static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
@@ -161,57 +157,40 @@ static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
hdmi->tmdsclk = DEFAULT_PLLA_RATE;
}
-static int
-rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static int rk3066_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
-
- hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
-
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
- packed_frame[i]);
-
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, HDMI_INFOFRAME_AVI);
+
+ return 0;
}
-static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+static int
+rk3066_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
{
- union hdmi_infoframe frame;
- int rc;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
+ ssize_t i;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector, mode);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
- else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ rk3066_hdmi_bridge_clear_infoframe(bridge, type);
- frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
- frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+ for (i = 0; i < len; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4, buffer[i]);
- return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
- HDMI_INFOFRAME_AVI, 0, 0, 0);
+ return 0;
}
static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
@@ -324,9 +303,27 @@ static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
}
static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+ struct drm_atomic_state *state)
{
- struct drm_display_info *display = &hdmi->connector.display_info;
+ struct drm_bridge *bridge = &hdmi->bridge;
+ struct drm_connector *connector;
+ struct drm_display_info *display;
+ struct drm_display_mode *mode;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!new_conn_state))
+ return -EINVAL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (WARN_ON(!new_crtc_state))
+ return -EINVAL;
+
+ display = &connector->display_info;
+ mode = &new_crtc_state->adjusted_mode;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
@@ -363,7 +360,7 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
if (display->is_hdmi) {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
HDMI_VIDEO_MODE_HDMI);
- rk3066_hdmi_config_avi(hdmi, mode);
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
} else {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
}
@@ -386,15 +383,15 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
int mux, val;
- conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ conn_state = drm_atomic_get_new_connector_state(state, hdmi->connector);
if (WARN_ON(!conn_state))
return;
@@ -402,7 +399,7 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
if (WARN_ON(!crtc_state))
return;
- mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, &hdmi->encoder.encoder);
if (mux)
val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
else
@@ -413,13 +410,13 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
+ rk3066_hdmi_setup(hdmi, state);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
@@ -450,39 +447,34 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
- .atomic_enable = rk3066_hdmi_encoder_enable,
- .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
-rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
connector_status_connected : connector_status_disconnected;
}
-static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+static const struct drm_edid *
+rk3066_hdmi_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
const struct drm_edid *drm_edid;
- int ret = 0;
-
- if (!hdmi->ddc)
- return 0;
- drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
+ drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
+ if (!drm_edid)
+ dev_dbg(hdmi->dev, "failed to get edid\n");
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+rk3066_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
@@ -492,82 +484,19 @@ rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder *
-rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
-
- return &hdmi->encoder.encoder;
-}
-
-static int
-rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- if (maxX > 1920)
- maxX = 1920;
- if (maxY > 1080)
- maxY = 1080;
-
- return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-}
-
-static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
- .fill_modes = rk3066_hdmi_probe_single_connector_modes,
- .detect = rk3066_hdmi_connector_detect,
- .destroy = rk3066_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const
-struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
- .get_modes = rk3066_hdmi_connector_get_modes,
- .mode_valid = rk3066_hdmi_connector_mode_valid,
- .best_encoder = rk3066_hdmi_connector_best_encoder,
+static const struct drm_bridge_funcs rk3066_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rk3066_hdmi_bridge_atomic_enable,
+ .atomic_disable = rk3066_hdmi_bridge_atomic_disable,
+ .detect = rk3066_hdmi_bridge_detect,
+ .edid_read = rk3066_hdmi_bridge_edid_read,
+ .hdmi_clear_infoframe = rk3066_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = rk3066_hdmi_bridge_write_infoframe,
+ .mode_valid = rk3066_hdmi_bridge_mode_valid,
};
-static int
-rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder.encoder;
- struct device *dev = hdmi->dev;
-
- encoder->possible_crtcs =
- drm_of_find_possible_crtcs(drm, dev->of_node);
-
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&hdmi->connector,
- &rk3066_hdmi_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->ddc);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
{
@@ -597,7 +526,7 @@ static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
- drm_helper_hpd_irq_event(hdmi->connector.dev);
+ drm_helper_hpd_irq_event(hdmi->connector->dev);
return IRQ_HANDLED;
}
@@ -720,7 +649,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
adap->name);
@@ -735,6 +664,66 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
return adap;
}
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder.encoder;
+ struct device *dev = hdmi->dev;
+ int ret;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &rk3066_hdmi_bridge_funcs;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.of_node = hdmi->dev->of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.vendor = "Rockchip";
+ hdmi->bridge.product = "RK3066 HDMI";
+
+ hdmi->bridge.ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ hdmi->connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_attach_encoder(hdmi->connector, encoder);
+
+ return 0;
+}
+
static int rk3066_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -781,13 +770,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
/* internal hclk = hdmi_hclk / 25 */
hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
- hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_hclk;
- }
-
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
usleep_range(999, 1000);
hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
@@ -798,7 +780,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
ret = rk3066_hdmi_register(drm, hdmi);
if (ret)
- goto err_disable_i2c;
+ goto err_disable_hclk;
dev_set_drvdata(dev, hdmi);
@@ -813,10 +795,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
return 0;
err_cleanup_hdmi:
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_disable_i2c:
- i2c_put_adapter(hdmi->ddc);
err_disable_hclk:
clk_disable_unprepare(hdmi->hclk);
@@ -828,10 +807,8 @@ static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
{
struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
- i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->hclk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index dcc1f07632c3..2f469d370021 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -30,21 +30,18 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
static struct drm_framebuffer *
rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_afbc_framebuffer *afbc_fb;
- const struct drm_format_info *info;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
if (!afbc_fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base,
+ file, info, mode_cmd,
&rockchip_drm_fb_funcs);
if (ret) {
kfree(afbc_fb);
@@ -52,16 +49,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
}
if (drm_is_afbc(mode_cmd->modifier[0])) {
- int ret, i;
-
- ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_fb_afbc_init(dev, info, mode_cmd, afbc_fb);
if (ret) {
- struct drm_gem_object **obj = afbc_fb->base.obj;
-
- for (i = 0; i < info->num_planes; ++i)
- drm_gem_object_put(obj[i]);
-
- kfree(afbc_fb);
+ drm_framebuffer_put(&afbc_fb->base);
return ERR_PTR(ret);
}
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index e3596e2b557d..ba6b0528d1e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -733,11 +733,10 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
- if (crtc->state->self_refresh_active)
+ if (crtc->state->self_refresh_active) {
rockchip_drm_set_win_enabled(crtc, false);
-
- if (crtc->state->self_refresh_active)
goto out;
+ }
mutex_lock(&vop->vop_lock);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index d0f5fea15e21..186f6452a7d3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -146,25 +146,6 @@ static void vop2_unlock(struct vop2 *vop2)
mutex_unlock(&vop2->vop2_lock);
}
-/*
- * Note:
- * The write mask function is documented but missing on rk3566/8, writes
- * to these bits have no effect. For newer soc(rk3588 and following) the
- * write mask is needed for register writes.
- *
- * GLB_CFG_DONE_EN has no write mask bit.
- *
- */
-static void vop2_cfg_done(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
-
- val |= BIT(vp->id) | (BIT(vp->id) << 16);
-
- regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
-}
-
static void vop2_win_disable(struct vop2_win *win)
{
vop2_win_write(win, VOP2_WIN_ENABLE, 0);
@@ -854,6 +835,11 @@ static void vop2_enable(struct vop2 *vop2)
if (vop2->version == VOP_VERSION_RK3588)
rk3588_vop2_power_domain_enable_all(vop2);
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ vop2->old_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ vop2->old_port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ }
+
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
@@ -2422,6 +2408,10 @@ static int vop2_create_crtcs(struct vop2 *vop2)
break;
}
}
+
+ if (!vp->primary_plane)
+ return dev_err_probe(drm->dev, -ENOENT,
+ "no primary plane for vp %d\n", i);
}
/* Register all unused window as overlay plane */
@@ -2724,6 +2714,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n");
mutex_init(&vop2->vop2_lock);
+ mutex_init(&vop2->ovl_lock);
ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 680bedbb770e..fa5c56f16047 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -334,6 +334,19 @@ struct vop2 {
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
+ /*
+ * Used to record layer selection configuration on rk356x/rk3588
+ * as register RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are
+ * shared for all the Video Ports.
+ */
+ u32 old_layer_sel;
+ u32 old_port_sel;
+ /*
+ * Ensure that the updates to these two registers(RKK3568_OVL_LAYER_SEL/RK3568_OVL_PORT_SEL)
+ * take effect in sequence.
+ */
+ struct mutex ovl_lock;
+
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -710,6 +723,7 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
+#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL GENMASK(31, 30)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
@@ -726,6 +740,7 @@ enum dst_factor_mode {
#define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20)
#define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18)
#define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16)
+#define RK3588_OVL_PORT_SET__PORT3_MUX GENMASK(15, 12)
#define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8)
#define RK3568_OVL_PORT_SET__PORT1_MUX GENMASK(7, 4)
#define RK3568_OVL_PORT_SET__PORT0_MUX GENMASK(3, 0)
@@ -830,4 +845,23 @@ static inline struct vop2_win *to_vop2_win(struct drm_plane *p)
return container_of(p, struct vop2_win, base);
}
+/*
+ * Note:
+ * The write mask function is documented but missing on rk3566/8, writes
+ * to these bits have no effect. For newer soc(rk3588 and following) the
+ * write mask is needed for register writes.
+ *
+ * GLB_CFG_DONE_EN has no write mask bit.
+ *
+ */
+static inline void vop2_cfg_done(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
+
+ val |= BIT(vp->id) | (BIT(vp->id) << 16);
+
+ regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
+}
+
#endif /* _ROCKCHIP_DRM_VOP2_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index a673779de3d2..2411260db51d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -56,14 +56,13 @@ struct rockchip_lvds {
struct drm_device *drm_dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
- struct drm_connector connector;
struct rockchip_encoder encoder;
struct dev_pin_info *pins;
};
-static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector)
+static inline struct rockchip_lvds *brige_to_lvds(struct drm_bridge *bridge)
{
- return container_of(connector, struct rockchip_lvds, connector);
+ return (struct rockchip_lvds *)bridge->driver_private;
}
static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder)
@@ -106,25 +105,21 @@ static inline int rockchip_lvds_name_to_output(const char *s)
return -EINVAL;
}
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+static int
+rockchip_lvds_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct rockchip_lvds *lvds = brige_to_lvds(bridge);
struct drm_panel *panel = lvds->panel;
return drm_panel_get_modes(panel, connector);
}
static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
- .get_modes = rockchip_lvds_connector_get_modes,
+struct drm_bridge_funcs rockchip_lvds_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .get_modes = rockchip_lvds_bridge_get_modes,
};
static int
@@ -606,26 +601,23 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
- connector = &lvds->connector;
if (lvds->panel) {
- connector->dpms = DRM_MODE_DPMS_OFF;
- ret = drm_connector_init(drm_dev, connector,
- &rockchip_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret < 0) {
- drm_err(drm_dev,
- "failed to initialize connector: %d\n", ret);
+ lvds->bridge = drm_panel_bridge_add_typed(lvds->panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds->bridge)) {
+ ret = PTR_ERR(lvds->bridge);
goto err_free_encoder;
}
+ }
- drm_connector_helper_add(connector,
- &rockchip_lvds_connector_helper_funcs);
- } else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (lvds->bridge) {
+ lvds->bridge->driver_private = lvds;
+ lvds->bridge->ops = DRM_BRIDGE_OP_MODES;
+ lvds->bridge->funcs = &rockchip_lvds_bridge_funcs;
+
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
- goto err_free_encoder;
+ goto err_free_bridge;
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
@@ -633,14 +625,14 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
"failed to initialize bridge connector: %pe\n",
connector);
ret = PTR_ERR(connector);
- goto err_free_encoder;
+ goto err_free_bridge;
}
- }
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
+ goto err_free_bridge;
+ }
}
pm_runtime_enable(dev);
@@ -649,8 +641,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
return 0;
-err_free_connector:
- drm_connector_cleanup(connector);
+err_free_bridge:
+ drm_panel_bridge_remove(lvds->bridge);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_put_remote:
@@ -670,8 +662,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
encoder_funcs = lvds->soc_data->helper_funcs;
encoder_funcs->disable(&lvds->encoder.encoder);
pm_runtime_disable(dev);
- drm_connector_cleanup(&lvds->connector);
- drm_encoder_cleanup(&lvds->encoder.encoder);
}
static const struct component_ops rockchip_lvds_component_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 0a2840cbe8e2..45c5e3987813 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2052,12 +2052,55 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
}
}
+static u32 rk3568_vop2_read_port_mux(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+}
+
+static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
+{
+ u32 port_mux_sel;
+ int ret;
+
+ /*
+ * Spin until the previous port_mux figuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
+ port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
+ port_mux_sel, vop2->old_port_sel);
+}
+
+static u32 rk3568_vop2_read_layer_cfg(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+}
+
+static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
+{
+ u32 atv_layer_cfg;
+ int ret;
+
+ /*
+ * Spin until the previous layer configuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
+ atv_layer_cfg == cfg, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
+ atv_layer_cfg, cfg);
+}
+
static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
+ u32 old_layer_sel = 0;
+ u32 atv_layer_sel = 0;
+ u32 old_port_sel = 0;
u8 layer_id;
u8 old_layer_id;
u8 layer_sel_id;
@@ -2069,16 +2112,18 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct vop2_video_port *vp2 = &vop2->vps[2];
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+ mutex_lock(&vop2->ovl_lock);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
+
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
- vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
-
- port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ old_port_sel = vop2->old_port_sel;
+ port_sel = old_port_sel;
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
if (vp0->nlayers)
@@ -2099,7 +2144,13 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
- layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ /* Fixed value for rk3588 */
+ if (vop2->version == VOP_VERSION_RK3588)
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SET__PORT3_MUX, 7);
+
+ atv_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ old_layer_sel = vop2->old_layer_sel;
+ layer_sel = old_layer_sel;
ofs = 0;
for (i = 0; i < vp->id; i++)
@@ -2183,8 +2234,37 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
old_win->data->layer_sel_id[vp->id]);
}
+ vop2->old_layer_sel = layer_sel;
+ vop2->old_port_sel = port_sel;
+ /*
+ * As the RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are shared by all Video Ports,
+ * and the configuration take effect by one Video Port's vsync.
+ * When performing layer migration or change the zpos of layers, there are two things
+ * to be observed and followed:
+ * 1. When a layer is migrated from one VP to another, the configuration of the layer
+ * can only take effect after the Port mux configuration is enabled.
+ *
+ * 2. When we change the zpos of layers, we must ensure that the change for the previous
+ * VP takes effect before we proceed to change the next VP. Otherwise, the new
+ * configuration might overwrite the previous one for the previous VP, or it could
+ * lead to the configuration of the previous VP being take effect along with the VSYNC
+ * of the new VP.
+ */
+ if (layer_sel != old_layer_sel || port_sel != old_port_sel)
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
+
+ if (port_sel != old_port_sel) {
+ vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ vop2_cfg_done(vp);
+ rk3568_vop2_wait_for_port_mux_done(vop2);
+ }
+
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
- vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ mutex_unlock(&vop2->ovl_lock);
}
static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 4e2099d86517..d1f788763318 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -906,21 +906,21 @@ static const struct vop_data rk3366_vop = {
static const struct vop_output rk3399_output = {
.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
- .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
- .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
- .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
- .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 31),
.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
+ .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
- .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
- .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
+ .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 3),
};
static const struct vop_common rk3399_common = {
@@ -975,23 +975,23 @@ static const struct vop_win_phy rk3399_win0_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full_afbc,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3399_win1_data = {
@@ -999,23 +999,23 @@ static const struct vop_win_phy rk3399_win1_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
/*
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig
new file mode 100644
index 000000000000..cece53609fcf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/.kunitconfig
@@ -0,0 +1,12 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_SCHED_KUNIT_TEST=y
+CONFIG_EXPERT=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_LIST=y
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index 53863621829f..6e13e4c63e9d 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -23,3 +23,5 @@
gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index f56e77e7f6d0..261713dd7d5a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -32,78 +32,123 @@
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+/**
+ * DOC: uAPI trace events
+ *
+ * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``,
+ * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered
+ * stable uAPI.
+ *
+ * Common trace events attributes:
+ *
+ * * ``dev`` - the dev_name() of the device running the job.
+ *
+ * * ``ring`` - the hardware ring running the job. Together with ``dev`` it
+ * uniquely identifies where the job is going to be executed.
+ *
+ * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of
+ * &struct drm_sched_fence.finished
+ *
+ * All the events depends on drm_sched_job_arm() having been called already for
+ * the job because they use &struct drm_sched_job.sched or
+ * &struct drm_sched_job.s_fence.
+ */
+
DECLARE_EVENT_CLASS(drm_sched_job,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
- __field(struct drm_sched_entity *, entity)
- __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name)
- __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
+ __string(dev, dev_name(sched_job->sched->dev))
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, client_id)
),
TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
__assign_str(name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->credit_count);
+ __assign_str(dev);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->client_id = sched_job->s_fence->drm_client_id;
),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __get_str(name),
- __entry->job_count, __entry->hw_job_count)
+ TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu",
+ __get_str(dev),
+ __entry->fence_context, __entry->fence_seqno, __get_str(name),
+ __entry->job_count, __entry->hw_job_count, __entry->client_id)
);
-DEFINE_EVENT(drm_sched_job, drm_sched_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-DEFINE_EVENT(drm_sched_job, drm_run_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-TRACE_EVENT(drm_sched_process_job,
+TRACE_EVENT(drm_sched_job_done,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
),
TP_fast_assign(
- __entry->fence = &fence->finished;
+ __entry->fence_context = fence->finished.context;
+ __entry->fence_seqno = fence->finished.seqno;
),
- TP_printk("fence=%p signaled", __entry->fence)
+ TP_printk("fence=%llu:%llu signaled",
+ __entry->fence_context, __entry->fence_seqno)
);
-TRACE_EVENT(drm_sched_job_wait_dep,
+TRACE_EVENT(drm_sched_job_add_dep,
+ TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("fence=%llu:%llu depends on fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
+);
+
+TRACE_EVENT(drm_sched_job_unschedulable,
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __string(name, sched_job->sched->name)
- __field(uint64_t, id)
- __field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
- __assign_str(name);
- __entry->id = sched_job->id;
- __entry->fence = fence;
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
- __get_str(name), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
);
#endif /* _GPU_SCHED_TRACE_H_ */
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index bd39db7bb240..8867b95ab089 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -176,6 +176,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+ drm_sched_fence_scheduled(job->s_fence, NULL);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);
@@ -354,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
-static void drm_sched_entity_clear_dep(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_entity *entity =
- container_of(cb, struct drm_sched_entity, cb);
-
- entity->dependency = NULL;
- dma_fence_put(f);
-}
-
/*
* drm_sched_entity_wakeup - callback to clear the entity's dependency and
* wake up the scheduler
@@ -375,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
- drm_sched_entity_clear_dep(f, cb);
+ entity->dependency = NULL;
+ dma_fence_put(f);
drm_sched_wakeup(entity->rq->sched);
}
@@ -428,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!dma_fence_add_callback(fence, &entity->cb,
- drm_sched_entity_clear_dep))
- return true;
-
- /* Ignore it when it is already scheduled */
- dma_fence_put(fence);
- return false;
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
@@ -477,10 +461,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
-
- if (drm_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity)) {
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
return NULL;
+ }
}
/* skip jobs from entity that marked guilty */
@@ -545,10 +529,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
/*
- * Only when the queue is empty are we guaranteed that the scheduler
- * thread cannot change ->last_scheduled. To enforce ordering we need
- * a read barrier here. See drm_sched_entity_pop_job() for the other
- * side.
+ * Only when the queue is empty are we guaranteed that
+ * drm_sched_run_job_work() cannot change entity->last_scheduled. To
+ * enforce ordering we need a read barrier here. See
+ * drm_sched_entity_pop_job() for the other side.
*/
smp_rmb();
@@ -586,7 +570,15 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
bool first;
ktime_t submit_ts;
- trace_drm_sched_job(sched_job, entity);
+ trace_drm_sched_job_queue(sched_job, entity);
+
+ if (trace_drm_sched_job_add_dep_enabled()) {
+ struct dma_fence *entry;
+ unsigned long index;
+
+ xa_for_each(&sched_job->dependencies, index, entry)
+ trace_drm_sched_job_add_dep(sched_job, entry);
+ }
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index e971528504a5..9391d6f0dc01 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -206,7 +206,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
- void *owner)
+ void *owner,
+ u64 drm_client_id)
{
struct drm_sched_fence *fence = NULL;
@@ -215,6 +216,7 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
+ fence->drm_client_id = drm_client_id;
spin_lock_init(&fence->lock);
return fence;
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
index 599cf6e1bb74..7ea5a6736f98 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -24,7 +24,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
- void *owner);
+ void *owner, u64 drm_client_id);
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity);
void drm_sched_fence_free(struct drm_sched_fence *fence);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bfea608a7106..e2cda28a1af4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -66,6 +66,7 @@
* This implies waiting for previously executed jobs.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -83,12 +84,6 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map drm_sched_lockdep_map = {
- .name = "drm_sched_lockdep_map"
-};
-#endif
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -268,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current
- * entity in terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
}
}
list_for_each_entry(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current entity in
- * terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
if (entity == rq->current_entity)
break;
@@ -308,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
spin_unlock(&rq->lock);
return NULL;
+
+found:
+ if (!drm_sched_can_queue(sched, entity)) {
+ /*
+ * If scheduler cannot take more jobs signal the caller to not
+ * consider lower priority queues.
+ */
+ entity = ERR_PTR(-ENOSPC);
+ } else {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ }
+
+ spin_unlock(&rq->lock);
+
+ return entity;
}
/**
@@ -379,11 +366,16 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished))
__drm_sched_run_free_queue(sched);
+}
+
+static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_run_free_queue(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -391,7 +383,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -401,7 +393,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -536,11 +528,37 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_unlock(&sched->job_list_lock);
}
+/**
+ * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
+ * @sched: scheduler instance
+ * @job: job to be reinserted on the pending list
+ *
+ * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
+ * hung and is making progress, the scheduler must reinsert the job back into
+ * @sched->pending_list. Otherwise, the job and its resources won't be freed
+ * through the &struct drm_sched_backend_ops.free_job callback.
+ *
+ * This function must be used in "false timeout" cases only.
+ */
+static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job)
+{
+ spin_lock(&sched->job_list_lock);
+ list_add(&job->list, &sched->pending_list);
+
+ /* After reinserting the job, the scheduler enqueues the free-job work
+ * again if ready. Otherwise, a signaled job could be added to the
+ * pending list, but never freed.
+ */
+ drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -551,9 +569,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -568,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+
+ if (status == DRM_GPU_SCHED_STAT_NO_HANG)
+ drm_sched_job_reinsert_on_false_timeout(sched, job);
} else {
spin_unlock(&sched->job_list_lock);
}
@@ -590,6 +612,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
* This function is typically used for reset recovery (see the docu of
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler teardown, i.e., before calling drm_sched_fini().
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -599,10 +625,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -615,7 +641,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -674,15 +701,19 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler startup. The scheduler itself is fully operational after
* drm_sched_init() succeeded.
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -764,6 +795,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -784,7 +817,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -810,7 +844,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -828,11 +862,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -846,7 +884,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -1015,13 +1052,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before it was submitted to an entity with drm_sched_entity_push_job().
+ * before drm_sched_job_arm() is called.
*
- * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
- * it is up to the driver to ensure that fences that were exposed to external
- * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
*
- * This function must also be called in &struct drm_sched_backend_ops.free_job
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,10 +1067,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before arming */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1183,7 +1226,7 @@ static void drm_sched_free_job_work(struct work_struct *w)
if (job)
sched->ops->free_job(job);
- drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1219,21 +1262,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
@@ -1243,6 +1289,25 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_run_job_queue(sched);
}
+static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
+{
+#if (IS_ENABLED(CONFIG_LOCKDEP))
+ static struct lockdep_map map = {
+ .name = "drm_sched_lockdep_map"
+ };
+
+ /*
+ * Avoid leaking a lockdep map on each drm sched creation and
+ * destruction by using a single lockdep map for all drm sched
+ * allocated submit_wq.
+ */
+
+ return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
+#else
+ return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -1283,13 +1348,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
-#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
- WQ_MEM_RECLAIM,
- &drm_sched_lockdep_map);
-#else
- sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
-#endif
+ sched->submit_wq = drm_sched_alloc_wq(args->name);
if (!sched->submit_wq)
return -ENOMEM;
@@ -1335,6 +1394,18 @@ Out_check_own:
}
EXPORT_SYMBOL(drm_sched_init);
+static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job, *tmp;
+
+ /* All other accessors are stopped. No locking necessary. */
+ list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
+ sched->ops->cancel_job(job);
+ list_del(&job->list);
+ sched->ops->free_job(job);
+ }
+}
+
/**
* drm_sched_fini - Destroy a gpu scheduler
*
@@ -1342,19 +1413,11 @@ EXPORT_SYMBOL(drm_sched_init);
*
* Tears down and cleans up the scheduler.
*
- * This stops submission of new jobs to the hardware through
- * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
- * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
- * There is no solution for this currently. Thus, it is up to the driver to make
- * sure that:
- *
- * a) drm_sched_fini() is only called after for all submitted jobs
- * drm_sched_backend_ops.free_job() has been called or that
- * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
- * after drm_sched_fini() ran are freed manually.
- *
- * FIXME: Take care of the above problem and prevent this function from leaking
- * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
+ * This stops submission of new jobs to the hardware through &struct
+ * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
+ * is implemented, all jobs will be canceled through it and afterwards cleaned
+ * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
+ * implemented, memory could leak.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1384,11 +1447,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ /* Avoid memory leaks if supported by the driver. */
+ if (sched->ops->cancel_job)
+ drm_sched_cancel_remaining_jobs(sched);
+
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+
+ if (!list_empty(&sched->pending_list))
+ dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile
new file mode 100644
index 000000000000..5bf707bad373
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+drm-sched-tests-y := \
+ mock_scheduler.o \
+ tests_basic.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
new file mode 100644
index 000000000000..65acffc3fea8
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include "sched_tests.h"
+
+/*
+ * Here we implement the mock "GPU" (or the scheduler backend) which is used by
+ * the DRM scheduler unit tests in order to exercise the core functionality.
+ *
+ * Test cases are implemented in a separate file.
+ */
+
+/**
+ * drm_mock_sched_entity_new - Create a new mock scheduler entity
+ *
+ * @test: KUnit test owning the entity
+ * @priority: Scheduling priority
+ * @sched: Mock scheduler on which the entity can be scheduled
+ *
+ * Returns: New mock scheduler entity with allocation managed by the test
+ */
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_gpu_scheduler *drm_sched;
+ int ret;
+
+ entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ drm_sched = &sched->base;
+ ret = drm_sched_entity_init(&entity->base,
+ priority,
+ &drm_sched, 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ entity->test = test;
+
+ return entity;
+}
+
+/**
+ * drm_mock_sched_entity_free - Destroys a mock scheduler entity
+ *
+ * @entity: Entity to destroy
+ *
+ * To be used from the test cases once done with the entity.
+ */
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+
+ lockdep_assert_held(&sched->lock);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_DONE;
+ list_del(&job->link);
+ dma_fence_signal_locked(&job->hw_fence);
+ complete(&job->done);
+}
+
+static enum hrtimer_restart
+drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
+{
+ struct drm_mock_sched_job *job =
+ container_of(hrtimer, typeof(*job), timer);
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+ struct drm_mock_sched_job *next;
+ ktime_t now = ktime_get();
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (!job->duration_us)
+ break;
+
+ if (ktime_before(now, job->finish_at))
+ break;
+
+ sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
+ drm_mock_sched_job_complete(job);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * drm_mock_sched_job_new - Create a new mock scheduler job
+ *
+ * @test: KUnit test owning the job
+ * @entity: Scheduler entity of the job
+ *
+ * Returns: New mock scheduler job with allocation managed by the test
+ */
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity)
+{
+ struct drm_mock_sched_job *job;
+ int ret;
+
+ job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, job);
+
+ ret = drm_sched_job_init(&job->base,
+ &entity->base,
+ 1,
+ NULL,
+ 1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ job->test = test;
+
+ init_completion(&job->done);
+ INIT_LIST_HEAD(&job->link);
+ hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return job;
+}
+
+static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
+{
+ return "drm_mock_sched";
+}
+
+static const char *
+drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ return (const char *)job->base.sched->name;
+}
+
+static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ hrtimer_cancel(&job->timer);
+
+ /* Containing job is freed by the kunit framework */
+}
+
+static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
+ .get_driver_name = drm_mock_sched_hw_fence_driver_name,
+ .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
+ .release = drm_mock_sched_hw_fence_release,
+};
+
+static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_init(&job->hw_fence,
+ &drm_mock_sched_hw_fence_ops,
+ &sched->lock,
+ sched->hw_timeline.context,
+ atomic_inc_return(&sched->hw_timeline.next_seqno));
+
+ dma_fence_get(&job->hw_fence); /* Reference for the job_list */
+
+ spin_lock_irq(&sched->lock);
+ if (job->duration_us) {
+ ktime_t prev_finish_at = 0;
+
+ if (!list_empty(&sched->job_list)) {
+ struct drm_mock_sched_job *prev =
+ list_last_entry(&sched->job_list, typeof(*prev),
+ link);
+
+ prev_finish_at = prev->finish_at;
+ }
+
+ if (!prev_finish_at)
+ prev_finish_at = ktime_get();
+
+ job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
+ }
+ list_add_tail(&job->link, &sched->job_list);
+ if (job->finish_at)
+ hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
+ spin_unlock_irq(&sched->lock);
+
+ return &job->hw_fence;
+}
+
+/*
+ * Normally, drivers would take appropriate measures in this callback, such as
+ * killing the entity the faulty job is associated with, resetting the hardware
+ * and / or resubmitting non-faulty jobs.
+ *
+ * For the mock scheduler, there are no hardware rings to be resetted nor jobs
+ * to be resubmitted. Thus, this function merely ensures that
+ * a) timedout fences get signaled properly and removed from the pending list
+ * b) the mock scheduler framework gets informed about the timeout via a flag
+ * c) The drm_sched_job, not longer needed, gets freed
+ */
+static enum drm_gpu_sched_stat
+mock_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
+ job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+ dma_fence_set_error(&job->hw_fence, -ETIMEDOUT);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ dma_fence_put(&job->hw_fence);
+ drm_sched_job_cleanup(sched_job);
+ /* Mock job itself is freed by the kunit framework. */
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void mock_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_put(&job->hw_fence);
+ drm_sched_job_cleanup(sched_job);
+
+ /* Mock job itself is freed by the kunit framework. */
+}
+
+static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ dma_fence_set_error(&job->hw_fence, -ECANCELED);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
+ * Mock job itself is freed by the kunit framework.
+ */
+}
+
+static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
+ .run_job = mock_sched_run_job,
+ .timedout_job = mock_sched_timedout_job,
+ .free_job = mock_sched_free_job,
+ .cancel_job = mock_sched_cancel_job,
+};
+
+/**
+ * drm_mock_sched_new - Create a new mock scheduler
+ *
+ * @test: KUnit test owning the job
+ * @timeout: Job timeout to set
+ *
+ * Returns: New mock scheduler with allocation managed by the test
+ */
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
+{
+ struct drm_sched_init_args args = {
+ .ops = &drm_mock_scheduler_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = U32_MAX,
+ .hang_limit = 1,
+ .timeout = timeout,
+ .name = "drm-mock-scheduler",
+ };
+ struct drm_mock_scheduler *sched;
+ int ret;
+
+ sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sched);
+
+ ret = drm_sched_init(&sched->base, &args);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sched->test = test;
+ sched->hw_timeline.context = dma_fence_context_alloc(1);
+ atomic_set(&sched->hw_timeline.next_seqno, 0);
+ INIT_LIST_HEAD(&sched->job_list);
+ spin_lock_init(&sched->lock);
+
+ return sched;
+}
+
+/**
+ * drm_mock_sched_fini - Destroys a mock scheduler
+ *
+ * @sched: Scheduler to destroy
+ *
+ * To be used from the test cases once done with the scheduler.
+ */
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
+{
+ drm_sched_fini(&sched->base);
+}
+
+/**
+ * drm_mock_sched_advance - Advances the mock scheduler timeline
+ *
+ * @sched: Scheduler timeline to advance
+ * @num: By how many jobs to advance
+ *
+ * Advancing the scheduler timeline by a number of seqnos will trigger
+ * signalling of the hardware fences and unlinking the jobs from the internal
+ * scheduler tracking.
+ *
+ * This can be used from test cases which want complete control of the simulated
+ * job execution timing. For example submitting one job with no set duration
+ * would never complete it before test cases advances the timeline by one.
+ */
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned int found = 0;
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
+ sched->hw_timeline.cur_seqno))
+ goto unlock;
+ sched->hw_timeline.cur_seqno += num;
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
+ break;
+
+ drm_mock_sched_job_complete(job);
+ found++;
+ }
+unlock:
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return found;
+}
+
+MODULE_DESCRIPTION("DRM mock scheduler and tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
new file mode 100644
index 000000000000..63d4f2ac7074
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _SCHED_TESTS_H_
+#define _SCHED_TESTS_H_
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <drm/gpu_scheduler.h>
+
+/*
+ * DOC: Mock DRM scheduler data structures
+ *
+ * drm_mock_* data structures are used to implement a mock "GPU".
+ *
+ * They subclass the core DRM scheduler objects and add their data on top, which
+ * enables tracking the submitted jobs and simulating their execution with the
+ * attributes as specified by the test case.
+ */
+
+/**
+ * struct drm_mock_scheduler - implements a trivial mock GPU execution engine
+ *
+ * @base: DRM scheduler base class
+ * @test: Backpointer to owning the kunit test case
+ * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @job_list: List of jobs submitted to the mock GPU
+ * @done_list: List of jobs completed by the mock GPU
+ * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
+ * @cur_seqno for implementing a struct dma_fence signaling the
+ * simulated job completion.
+ *
+ * Trivial mock GPU execution engine tracks submitted jobs and enables
+ * completing them strictly in submission order.
+ */
+struct drm_mock_scheduler {
+ struct drm_gpu_scheduler base;
+
+ struct kunit *test;
+
+ spinlock_t lock;
+ struct list_head job_list;
+
+ struct {
+ u64 context;
+ atomic_t next_seqno;
+ unsigned int cur_seqno;
+ } hw_timeline;
+};
+
+/**
+ * struct drm_mock_sched_entity - implements a mock GPU sched entity
+ *
+ * @base: DRM scheduler entity base class
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched entity is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_entity {
+ struct drm_sched_entity base;
+
+ struct kunit *test;
+};
+
+/**
+ * struct drm_mock_sched_job - implements a mock GPU job
+ *
+ * @base: DRM sched job base class
+ * @done: Completion signaling job completion.
+ * @flags: Flags designating job state.
+ * @link: List head element used by job tracking by the drm_mock_scheduler
+ * @timer: Timer used for simulating job execution duration
+ * @duration_us: Simulated job duration in micro seconds, or zero if in manual
+ * timeline advance mode
+ * @finish_at: Absolute time when the jobs with set duration will complete
+ * @lock: Lock used for @hw_fence
+ * @hw_fence: Fence returned to DRM scheduler as the hardware fence
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched job is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_job {
+ struct drm_sched_job base;
+
+ struct completion done;
+
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+ unsigned long flags;
+
+ struct list_head link;
+ struct hrtimer timer;
+
+ unsigned int duration_us;
+ ktime_t finish_at;
+
+ struct dma_fence hw_fence;
+
+ struct kunit *test;
+};
+
+static inline struct drm_mock_scheduler *
+drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct drm_mock_scheduler, base);
+};
+
+static inline struct drm_mock_sched_entity *
+drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity)
+{
+ return container_of(sched_entity, struct drm_mock_sched_entity, base);
+};
+
+static inline struct drm_mock_sched_job *
+drm_sched_job_to_mock_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct drm_mock_sched_job, base);
+};
+
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test,
+ long timeout);
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num);
+
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched);
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity);
+
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity);
+
+/**
+ * drm_mock_sched_job_submit - Arm and submit a job in one go
+ *
+ * @job: Job to arm and submit
+ */
+static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job)
+{
+ drm_sched_job_arm(&job->base);
+ drm_sched_entity_push_job(&job->base);
+}
+
+/**
+ * drm_mock_sched_job_set_duration_us - Set a job duration
+ *
+ * @job: Job to set the duration for
+ * @duration_us: Duration in micro seconds
+ *
+ * Jobs with duration set will be automatically completed by the mock scheduler
+ * as the timeline progresses, unless a job without a set duration is
+ * encountered in the timelime in which case calling drm_mock_sched_advance()
+ * will be required to bump the timeline.
+ */
+static inline void
+drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job,
+ unsigned int duration_us)
+{
+ job->duration_us = duration_us;
+}
+
+/**
+ * drm_mock_sched_job_is_finished - Check if a job is finished
+ *
+ * @job: Job to check
+ *
+ * Returns: true if finished
+ */
+static inline bool
+drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job)
+{
+ return job->flags & DRM_MOCK_SCHED_JOB_DONE;
+}
+
+/**
+ * drm_mock_sched_job_wait_finished - Wait until a job is finished
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if finished within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout)
+{
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONE)
+ return true;
+
+ return wait_for_completion_timeout(&job->done, timeout) != 0;
+}
+
+/**
+ * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if scheduled within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout)
+{
+ KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0);
+
+ return dma_fence_wait_timeout(&job->base.s_fence->scheduled,
+ false,
+ timeout) != 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
new file mode 100644
index 000000000000..55eb142bd7c5
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include <linux/delay.h>
+
+#include "sched_tests.h"
+
+#define MOCK_TIMEOUT (HZ / 5)
+
+/*
+ * DRM scheduler basic tests should check the basic functional correctness of
+ * the scheduler, including some very light smoke testing. More targeted tests,
+ * for example focusing on testing specific bugs and other more complicated test
+ * scenarios, should be implemented in separate source units.
+ */
+
+static int drm_sched_basic_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_exit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+
+ drm_mock_sched_fini(sched);
+}
+
+static int drm_sched_timeout_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_submit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit one job to the scheduler and verify that it gets scheduled
+ * and completed only when the mock hw backend processes it.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+struct drm_sched_basic_params {
+ const char *description;
+ unsigned int queue_depth;
+ unsigned int num_entities;
+ unsigned int job_us;
+ bool dep_chain;
+};
+
+static const struct drm_sched_basic_params drm_sched_basic_cases[] = {
+ {
+ .description = "A queue of jobs in a single entity",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ },
+ {
+ .description = "A chain of dependent jobs across multiple entities",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ .dep_chain = true,
+ },
+ {
+ .description = "Multiple independent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ },
+ {
+ .description = "Multiple inter-dependent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ .dep_chain = true,
+ },
+};
+
+static void
+drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc)
+{
+ strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc);
+
+static void drm_sched_basic_test(struct kunit *test)
+{
+ const struct drm_sched_basic_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job, *prev = NULL;
+ struct drm_mock_sched_entity **entity;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ for (i = 0; i < params->num_entities; i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < params->queue_depth; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= params->num_entities;
+ drm_mock_sched_job_set_duration_us(job, params->job_us);
+ if (params->dep_chain && prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < params->num_entities; i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_basic_entity_cleanup(struct kunit *test)
+{
+ struct drm_mock_sched_job *job, *mid, *prev = NULL;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity[4];
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ /*
+ * Submit a queue of jobs across different entities with an explicit
+ * chain of dependencies between them and trigger entity cleanup while
+ * the queue is still being processed.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ if (prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ if (i == qd / 2)
+ mid = job;
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(mid, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ /* Exit with half of the queue still pending to be executed. */
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_basic_tests[] = {
+ KUNIT_CASE(drm_sched_basic_submit),
+ KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params),
+ KUNIT_CASE(drm_sched_basic_entity_cleanup),
+ {}
+};
+
+static struct kunit_suite drm_sched_basic = {
+ .name = "drm_sched_basic_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_basic_tests,
+};
+
+static void drm_sched_basic_cancel(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Check that drm_sched_fini() uses the cancel_job() callback to cancel
+ * jobs that are still pending.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+
+ KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED);
+}
+
+static struct kunit_case drm_sched_cancel_tests[] = {
+ KUNIT_CASE(drm_sched_basic_cancel),
+ {}
+};
+
+static struct kunit_suite drm_sched_cancel = {
+ .name = "drm_sched_basic_cancel_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_cancel_tests,
+};
+
+static void drm_sched_basic_timeout(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that the timeout handling will run if the backend fails
+ * to complete it in time.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ 0);
+
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ DRM_MOCK_SCHED_JOB_TIMEDOUT);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static void drm_sched_skip_reset(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that if the job is still running, the timeout handler
+ * will skip the reset and allow the job to complete.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET;
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
+ 0);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static struct kunit_case drm_sched_timeout_tests[] = {
+ KUNIT_CASE(drm_sched_basic_timeout),
+ KUNIT_CASE(drm_sched_skip_reset),
+ {}
+};
+
+static struct kunit_suite drm_sched_timeout = {
+ .name = "drm_sched_basic_timeout_tests",
+ .init = drm_sched_timeout_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_timeout_tests,
+};
+
+static void drm_sched_priorities(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+ bool done;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities.
+ */
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW);
+ BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT);
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_change_priority(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities and while waiting for them to complete, periodically keep
+ * changing their priorities.
+ *
+ * We set up the queue-depth (qd) and job duration so the priority
+ * changing loop has some time to interact with submissions to the
+ * backend and job completions as they progress.
+ */
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ drm_sched_entity_set_priority(&entity[cur_ent]->base,
+ (entity[cur_ent]->base.priority + 1) %
+ DRM_SCHED_PRIORITY_COUNT);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ usleep_range(200, 500);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_priority_tests[] = {
+ KUNIT_CASE(drm_sched_priorities),
+ KUNIT_CASE(drm_sched_change_priority),
+ {}
+};
+
+static struct kunit_suite drm_sched_priority = {
+ .name = "drm_sched_basic_priority_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_priority_tests,
+};
+
+static void drm_sched_test_modify_sched(struct kunit *test)
+{
+ unsigned int i, cur_ent = 0, cur_sched = 0;
+ struct drm_mock_sched_entity *entity[13];
+ struct drm_mock_scheduler *sched[3];
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * schedulers and while waiting for them to complete, periodically keep
+ * changing schedulers associated with each entity.
+ *
+ * We set up the queue-depth (qd) and job duration so the sched modify
+ * loop has some time to interact with submissions to the backend and
+ * job completions as they progress.
+ *
+ * For the number of schedulers and entities we use primes in order to
+ * perturb the entity->sched assignments with less of a regular pattern.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched[i % ARRAY_SIZE(sched)]);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ struct drm_gpu_scheduler *modify;
+
+ usleep_range(200, 500);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ cur_sched++;
+ cur_sched %= ARRAY_SIZE(sched);
+ modify = &sched[cur_sched]->base;
+ drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify,
+ 1);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ drm_mock_sched_fini(sched[i]);
+}
+
+static struct kunit_case drm_sched_modify_sched_tests[] = {
+ KUNIT_CASE(drm_sched_test_modify_sched),
+ {}
+};
+
+static struct kunit_suite drm_sched_modify_sched = {
+ .name = "drm_sched_basic_modify_sched_tests",
+ .test_cases = drm_sched_modify_sched_tests,
+};
+
+static void drm_sched_test_credits(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job[2];
+ bool done;
+ int i;
+
+ /*
+ * Check that the configured credit limit is respected.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ sched->base.credit_limit = 1;
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job[0] = drm_mock_sched_job_new(test, entity);
+ job[1] = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job[0]);
+ drm_mock_sched_job_submit(job[1]);
+
+ done = drm_mock_sched_job_wait_scheduled(job[0], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+}
+
+static struct kunit_case drm_sched_credits_tests[] = {
+ KUNIT_CASE(drm_sched_test_credits),
+ {}
+};
+
+static struct kunit_suite drm_sched_credits = {
+ .name = "drm_sched_basic_credits_tests",
+ .test_cases = drm_sched_credits_tests,
+};
+
+kunit_test_suites(&drm_sched_basic,
+ &drm_sched_timeout,
+ &drm_sched_cancel,
+ &drm_sched_priority,
+ &drm_sched_modify_sched,
+ &drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
new file mode 100644
index 000000000000..6de7d92d9b74
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -0,0 +1,42 @@
+config DRM_ST7571_I2C
+ tristate "DRM support for Sitronix ST7571 display panels (I2C)"
+ depends on DRM && I2C && MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Sitronix ST7571 panels controlled over I2C.
+
+ if M is selected the module will be called st7571-i2c.
+
+config DRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
+
+config DRM_ST7735R
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
+
+ If M is selected the module will be called st7735r.
+
diff --git a/drivers/gpu/drm/sitronix/Makefile b/drivers/gpu/drm/sitronix/Makefile
new file mode 100644
index 000000000000..bd139e5a6995
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DRM_ST7571_I2C) += st7571-i2c.o
+obj-$(CONFIG_DRM_ST7586) += st7586.o
+obj-$(CONFIG_DRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
new file mode 100644
index 000000000000..453eb7e045e5
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -0,0 +1,1079 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Sitronix ST7571, a 4 level gray scale dot matrix LCD controller
+ *
+ * Copyright (C) 2025 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+#define ST7571_COMMAND_MODE (0x00)
+#define ST7571_DATA_MODE (0x40)
+
+/* Normal mode command set */
+#define ST7571_DISPLAY_OFF (0xae)
+#define ST7571_DISPLAY_ON (0xaf)
+#define ST7571_OSC_ON (0xab)
+#define ST7571_SET_COLUMN_LSB(c) (0x00 | FIELD_PREP(GENMASK(3, 0), (c)))
+#define ST7571_SET_COLUMN_MSB(c) (0x10 | FIELD_PREP(GENMASK(2, 0), (c) >> 4))
+#define ST7571_SET_COM0_LSB(x) (FIELD_PREP(GENMASK(6, 0), (x)))
+#define ST7571_SET_COM0_MSB (0x44)
+#define ST7571_SET_COM_SCAN_DIR(d) (0xc0 | FIELD_PREP(GENMASK(3, 3), (d)))
+#define ST7571_SET_CONTRAST_LSB(c) (FIELD_PREP(GENMASK(5, 0), (c)))
+#define ST7571_SET_CONTRAST_MSB (0x81)
+#define ST7571_SET_DISPLAY_DUTY_LSB(d) (FIELD_PREP(GENMASK(7, 0), (d)))
+#define ST7571_SET_DISPLAY_DUTY_MSB (0x48)
+#define ST7571_SET_ENTIRE_DISPLAY_ON(p) (0xa4 | FIELD_PREP(GENMASK(0, 0), (p)))
+#define ST7571_SET_LCD_BIAS(b) (0x50 | FIELD_PREP(GENMASK(2, 0), (b)))
+#define ST7571_SET_MODE_LSB(m) (FIELD_PREP(GENMASK(7, 2), (m)))
+#define ST7571_SET_MODE_MSB (0x38)
+#define ST7571_SET_PAGE(p) (0xb0 | FIELD_PREP(GENMASK(3, 0), (p)))
+#define ST7571_SET_POWER(p) (0x28 | FIELD_PREP(GENMASK(2, 0), (p)))
+#define ST7571_SET_REGULATOR_REG(r) (0x20 | FIELD_PREP(GENMASK(2, 0), (r)))
+#define ST7571_SET_REVERSE(r) (0xa6 | FIELD_PREP(GENMASK(0, 0), (r)))
+#define ST7571_SET_SEG_SCAN_DIR(d) (0xa0 | FIELD_PREP(GENMASK(0, 0), (d)))
+#define ST7571_SET_START_LINE_LSB(l) (FIELD_PREP(GENMASK(6, 0), (l)))
+#define ST7571_SET_START_LINE_MSB (0x40)
+
+/* Extension command set 3 */
+#define ST7571_COMMAND_SET_3 (0x7b)
+#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
+#define ST7571_COMMAND_SET_NORMAL (0x00)
+
+/* ST7567 commands */
+#define ST7567_SET_LCD_BIAS(m) (0xa2 | FIELD_PREP(GENMASK(0, 0), (m)))
+
+#define ST7571_PAGE_HEIGHT 8
+
+#define DRIVER_NAME "st7571"
+#define DRIVER_DESC "ST7571 DRM driver"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+enum st7571_color_mode {
+ ST7571_COLOR_MODE_GRAY = 0,
+ ST7571_COLOR_MODE_BLACKWHITE = 1,
+};
+
+struct st7571_device;
+
+struct st7571_panel_constraints {
+ u32 min_nlines;
+ u32 max_nlines;
+ u32 min_ncols;
+ u32 max_ncols;
+ bool support_grayscale;
+};
+
+struct st7571_panel_data {
+ int (*init)(struct st7571_device *st7571);
+ int (*parse_dt)(struct st7571_device *st7571);
+ struct st7571_panel_constraints constraints;
+};
+
+struct st7571_panel_format {
+ void (*prepare_buffer)(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state);
+ int (*update_rect)(struct drm_framebuffer *fb, struct drm_rect *rect);
+ enum st7571_color_mode mode;
+ const u8 nformats;
+ const u32 formats[];
+};
+
+struct st7571_device {
+ struct drm_device dev;
+
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct drm_display_mode mode;
+
+ const struct st7571_panel_format *pformat;
+ const struct st7571_panel_data *pdata;
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct regmap *regmap;
+
+ /*
+ * Depending on the hardware design, the acknowledge signal may be hard to
+ * recognize as a valid logic "0" level.
+ * Therefor, ignore NAK if possible to stay compatible with most hardware designs
+ * and off-the-shelf panels out there.
+ *
+ * From section 6.4 MICROPOCESSOR INTERFACE section in the datasheet:
+ *
+ * "By connecting SDA_OUT to SDA_IN externally, the SDA line becomes fully
+ * I2C interface compatible.
+ * Separating acknowledge-output from serial data
+ * input is advantageous for chip-on-glass (COG) applications. In COG
+ * applications, the ITO resistance and the pull-up resistor will form a
+ * voltage divider, which affects acknowledge-signal level. Larger ITO
+ * resistance will raise the acknowledged-signal level and system cannot
+ * recognize this level as a valid logic “0” level. By separating SDA_IN from
+ * SDA_OUT, the IC can be used in a mode that ignores the acknowledge-bit.
+ * For applications which check acknowledge-bit, it is necessary to minimize
+ * the ITO resistance of the SDA_OUT trace to guarantee a valid low level."
+ *
+ */
+ bool ignore_nak;
+
+ bool grayscale;
+ u32 height_mm;
+ u32 width_mm;
+ u32 startline;
+ u32 nlines;
+ u32 ncols;
+ u32 bpp;
+
+ /* Intermediate buffer in LCD friendly format */
+ u8 *hwbuf;
+
+ /* Row of (transformed) pixels ready to be written to the display */
+ u8 *row;
+};
+
+static inline struct st7571_device *drm_to_st7571(struct drm_device *dev)
+{
+ return container_of(dev, struct st7571_device, dev);
+}
+
+static int st7571_regmap_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+ int ret;
+
+ struct i2c_msg msg = {
+ .addr = st7571->client->addr,
+ .flags = st7571->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ .len = count,
+ .buf = (u8 *)data
+ };
+
+ ret = i2c_transfer(st7571->client->adapter, &msg, 1);
+
+ /*
+ * Unfortunately, there is no way to check if the transfer failed because of
+ * a NAK or something else as I2C bus drivers use different return values for NAK.
+ *
+ * However, if the transfer fails and ignore_nak is set, we know it is an error.
+ */
+ if (ret < 0 && st7571->ignore_nak)
+ return ret;
+
+ return 0;
+}
+
+/* The st7571 driver does not read registers but regmap expects a .read */
+static int st7571_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static int st7571_send_command_list(struct st7571_device *st7571,
+ const u8 *cmd_list, size_t len)
+{
+ int ret;
+
+ for (int i = 0; i < len; i++) {
+ ret = regmap_write(st7571->regmap, ST7571_COMMAND_MODE, cmd_list[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline u8 st7571_transform_xy(const char *p, int x, int y)
+{
+ int xrest = x % 8;
+ u8 result = 0;
+
+ /*
+ * Transforms an (x, y) pixel coordinate into a vertical 8-bit
+ * column from the framebuffer. It calculates the corresponding byte in the
+ * framebuffer, extracts the bit at the given x position across 8 consecutive
+ * rows, and packs those bits into a single byte.
+ *
+ * Return an 8-bit value representing a vertical column of pixels.
+ */
+ x = x / 8;
+ y = (y / 8) * 8;
+
+ for (int i = 0; i < 8; i++) {
+ int row_idx = y + i;
+ u8 byte = p[row_idx * 16 + x];
+ u8 bit = (byte >> xrest) & 1;
+
+ result |= (bit << i);
+ }
+
+ return result;
+}
+
+static int st7571_set_position(struct st7571_device *st7571, int x, int y)
+{
+ u8 cmd_list[] = {
+ ST7571_SET_COLUMN_LSB(x),
+ ST7571_SET_COLUMN_MSB(x),
+ ST7571_SET_PAGE(y / ST7571_PAGE_HEIGHT),
+ };
+
+ return st7571_send_command_list(st7571, cmd_list, ARRAY_SIZE(cmd_list));
+}
+
+static int st7571_fb_clear_screen(struct st7571_device *st7571)
+{
+ u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
+ char pixelvalue = 0x00;
+
+ for (int i = 0; i < npixels; i++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
+
+ return 0;
+}
+
+static void st7571_prepare_buffer_monochrome(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ u32 size;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ }
+}
+
+static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ u32 size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+
+ case DRM_FORMAT_R2:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ };
+}
+
+static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ char *row = st7571->row;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = rect->x1; x < rect->x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = rect->x1; x < rect->x2; x++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+
+ return 0;
+}
+
+static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ u32 format = fb->format->format;
+ char *row = st7571->row;
+ int x1;
+ int x2;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ /* Threated as monochrome (R1) */
+ fallthrough;
+ case DRM_FORMAT_R1:
+ x1 = rect->x1;
+ x2 = rect->x2;
+ break;
+ case DRM_FORMAT_R2:
+ x1 = rect->x1 * 2;
+ x2 = rect->x2 * 2;
+ break;
+ }
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = x1; x < x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = x1; x < x2; x++) {
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+
+ /*
+ * As the display supports grayscale, all pixels must be written as two bits
+ * even if the format is monochrome.
+ *
+ * The bit values maps to the following grayscale:
+ * 0 0 = White
+ * 0 1 = Light gray
+ * 1 0 = Dark gray
+ * 1 1 = Black
+ *
+ * For monochrome formats, write the same value twice to get
+ * either a black or white pixel.
+ */
+ if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int st7571_connector_get_modes(struct drm_connector *conn)
+{
+ struct st7571_device *st7571 = drm_to_st7571(conn->dev);
+
+ return drm_connector_helper_get_modes_fixed(conn, &st7571->mode);
+}
+
+static const struct drm_connector_helper_funcs st7571_connector_helper_funcs = {
+ .get_modes = st7571_connector_get_modes,
+};
+
+static const struct st7571_panel_format st7571_monochrome = {
+ .prepare_buffer = st7571_prepare_buffer_monochrome,
+ .update_rect = st7571_fb_update_rect_monochrome,
+ .mode = ST7571_COLOR_MODE_BLACKWHITE,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ },
+ .nformats = 2,
+};
+
+static const struct st7571_panel_format st7571_grayscale = {
+ .prepare_buffer = st7571_prepare_buffer_grayscale,
+ .update_rect = st7571_fb_update_rect_grayscale,
+ .mode = ST7571_COLOR_MODE_GRAY,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ },
+ .nformats = 3,
+};
+
+static const u64 st7571_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int st7571_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void st7571_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *dev = plane->dev;
+ struct drm_rect damage;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int ret, idx;
+
+ if (!fb)
+ return; /* no framebuffer; plane is disabled */
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ st7571->pformat->prepare_buffer(st7571,
+ &shadow_plane_state->data[0],
+ fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+
+ st7571->pformat->update_rect(fb, &damage);
+ }
+
+ drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static void st7571_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ st7571_fb_clear_screen(st7571);
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs st7571_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = st7571_primary_plane_helper_atomic_check,
+ .atomic_update = st7571_primary_plane_helper_atomic_update,
+ .atomic_disable = st7571_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs st7571_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+/*
+ * CRTC
+ */
+
+static enum drm_mode_status st7571_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct st7571_device *st7571 = drm_to_st7571(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &st7571->mode);
+}
+
+static const struct drm_crtc_helper_funcs st7571_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .mode_valid = st7571_crtc_mode_valid,
+};
+
+static const struct drm_crtc_funcs st7571_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+/*
+ * Encoder
+ */
+
+static void st7571_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_ON;
+ int ret;
+
+ ret = st7571->pdata->init(st7571);
+ if (ret)
+ return;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static void st7571_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_OFF;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static const struct drm_encoder_funcs st7571_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+
+};
+
+static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
+ .atomic_enable = st7571_encoder_atomic_enable,
+ .atomic_disable = st7571_encoder_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static const struct drm_connector_funcs st7571_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs st7571_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_display_mode st7571_mode(struct st7571_device *st7571)
+{
+ struct drm_display_mode mode = {
+ DRM_SIMPLE_MODE(st7571->ncols, st7571->nlines,
+ st7571->width_mm, st7571->height_mm),
+ };
+
+ return mode;
+}
+
+static int st7571_mode_config_init(struct st7571_device *st7571)
+{
+ struct drm_device *dev = &st7571->dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = constraints->min_ncols;
+ dev->mode_config.min_height = constraints->min_nlines;
+ dev->mode_config.max_width = constraints->max_ncols;
+ dev->mode_config.max_height = constraints->max_nlines;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &st7571_mode_config_funcs;
+
+ return 0;
+}
+
+static int st7571_plane_init(struct st7571_device *st7571,
+ const struct st7571_panel_format *pformat)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &st7571_primary_plane_funcs,
+ pformat->formats,
+ pformat->nformats,
+ st7571_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(primary_plane, &st7571_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ return 0;
+}
+
+static int st7571_crtc_init(struct st7571_device *st7571)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &st7571_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &st7571_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int st7571_encoder_init(struct st7571_device *st7571)
+{
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &st7571_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &st7571_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
+static int st7571_connector_init(struct st7571_device *st7571)
+{
+ struct drm_connector *connector = &st7571->connector;
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &st7571_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &st7571_connector_helper_funcs);
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+DEFINE_DRM_GEM_FOPS(st7571_fops);
+
+static const struct drm_driver st7571_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &st7571_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct regmap_bus st7571_regmap_bus = {
+ .read = st7571_regmap_read,
+ .write = st7571_regmap_write,
+};
+
+static const struct regmap_config st7571_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
+static int st7571_validate_parameters(struct st7571_device *st7571)
+{
+ struct device *dev = st7571->dev.dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+
+ if (st7571->width_mm == 0) {
+ dev_err(dev, "Invalid panel width\n");
+ return -EINVAL;
+ }
+
+ if (st7571->height_mm == 0) {
+ dev_err(dev, "Invalid panel height\n");
+ return -EINVAL;
+ }
+
+ if (st7571->nlines < constraints->min_nlines ||
+ st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->startline + st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->ncols < constraints->min_ncols ||
+ st7571->ncols > constraints->max_ncols) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->grayscale && !constraints->support_grayscale) {
+ dev_err(dev, "Grayscale not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st7567_parse_dt(struct st7571_device *st7567)
+{
+ struct device *dev = &st7567->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7567->width_mm);
+ of_property_read_u32(np, "height-mm", &st7567->height_mm);
+
+ st7567->pformat = &st7571_monochrome;
+ st7567->bpp = 1;
+
+ st7567->startline = dt.vfront_porch.typ;
+ st7567->nlines = dt.vactive.typ;
+ st7567->ncols = dt.hactive.typ;
+
+ return 0;
+}
+
+static int st7571_parse_dt(struct st7571_device *st7571)
+{
+ struct device *dev = &st7571->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7571->width_mm);
+ of_property_read_u32(np, "height-mm", &st7571->height_mm);
+ st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+
+ if (st7571->grayscale) {
+ st7571->pformat = &st7571_grayscale;
+ st7571->bpp = 2;
+ } else {
+ st7571->pformat = &st7571_monochrome;
+ st7571->bpp = 1;
+ }
+
+ st7571->startline = dt.vfront_porch.typ;
+ st7571->nlines = dt.vactive.typ;
+ st7571->ncols = dt.hactive.typ;
+
+ st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st7571->reset))
+ return dev_err_probe(dev, PTR_ERR(st7571->reset),
+ "Failed to get reset gpio\n");
+
+
+ return 0;
+}
+
+static void st7571_reset(struct st7571_device *st7571)
+{
+ gpiod_set_value_cansleep(st7571->reset, 1);
+ fsleep(20);
+ gpiod_set_value_cansleep(st7571->reset, 0);
+}
+
+static int st7567_lcd_init(struct st7571_device *st7567)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7567 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7567_SET_LCD_BIAS(1),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_REGULATOR_REG(4),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x20),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7567->startline),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ return st7571_send_command_list(st7567, commands, ARRAY_SIZE(commands));
+}
+
+static int st7571_lcd_init(struct st7571_device *st7571)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7571 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7571_SET_MODE_MSB,
+ ST7571_SET_MODE_LSB(0x2e),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_COM0_MSB,
+ ST7571_SET_COM0_LSB(0x00),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7571->startline),
+
+ ST7571_OSC_ON,
+ ST7571_SET_REGULATOR_REG(5),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x33),
+ ST7571_SET_LCD_BIAS(0x04),
+ ST7571_SET_DISPLAY_DUTY_MSB,
+ ST7571_SET_DISPLAY_DUTY_LSB(st7571->nlines),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_COMMAND_SET_3,
+ ST7571_SET_COLOR_MODE(st7571->pformat->mode),
+ ST7571_COMMAND_SET_NORMAL,
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ /* Perform a reset before initializing the controller */
+ st7571_reset(st7571);
+
+ return st7571_send_command_list(st7571, commands, ARRAY_SIZE(commands));
+}
+
+static int st7571_probe(struct i2c_client *client)
+{
+ struct st7571_device *st7571;
+ struct drm_device *dev;
+ int ret;
+
+ st7571 = devm_drm_dev_alloc(&client->dev, &st7571_driver,
+ struct st7571_device, dev);
+ if (IS_ERR(st7571))
+ return PTR_ERR(st7571);
+
+ dev = &st7571->dev;
+ st7571->client = client;
+ i2c_set_clientdata(client, st7571);
+ st7571->pdata = device_get_match_data(&client->dev);
+
+ ret = st7571->pdata->parse_dt(st7571);
+ if (ret)
+ return ret;
+
+ ret = st7571_validate_parameters(st7571);
+ if (ret)
+ return ret;
+
+ st7571->mode = st7571_mode(st7571);
+
+ /*
+ * The hardware design could make it hard to detect a NAK on the I2C bus.
+ * If the adapter does not support protocol mangling do
+ * not set the I2C_M_IGNORE_NAK flag at the expense * of possible
+ * cruft in the logs.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ st7571->ignore_nak = true;
+
+ st7571->regmap = devm_regmap_init(&client->dev, &st7571_regmap_bus,
+ client, &st7571_regmap_config);
+ if (IS_ERR(st7571->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(st7571->regmap),
+ "Failed to initialize regmap\n");
+ }
+
+ st7571->hwbuf = devm_kzalloc(&client->dev,
+ (st7571->nlines * st7571->ncols * st7571->bpp) / 8,
+ GFP_KERNEL);
+ if (!st7571->hwbuf)
+ return -ENOMEM;
+
+ st7571->row = devm_kzalloc(&client->dev,
+ (st7571->ncols * st7571->bpp),
+ GFP_KERNEL);
+ if (!st7571->row)
+ return -ENOMEM;
+
+ ret = st7571_mode_config_init(st7571);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize mode config\n");
+
+ ret = st7571_plane_init(st7571, st7571->pformat);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize primary plane\n");
+
+ ret = st7571_crtc_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize CRTC\n");
+
+ ret = st7571_encoder_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize encoder\n");
+
+ ret = st7571_connector_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize connector\n");
+
+ drm_mode_config_reset(dev);
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register DRM device\n");
+
+ drm_client_setup(dev, NULL);
+ return 0;
+}
+
+static void st7571_remove(struct i2c_client *client)
+{
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+
+ drm_dev_unplug(&st7571->dev);
+}
+
+struct st7571_panel_data st7567_config = {
+ .init = st7567_lcd_init,
+ .parse_dt = st7567_parse_dt,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 64,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = false,
+ },
+};
+
+struct st7571_panel_data st7571_config = {
+ .init = st7571_lcd_init,
+ .parse_dt = st7571_parse_dt,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 128,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = true,
+ },
+};
+
+static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7567", .data = &st7567_config },
+ { .compatible = "sitronix,st7571", .data = &st7571_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7571_of_match);
+
+static const struct i2c_device_id st7571_id[] = {
+ { "st7567", 0 },
+ { "st7571", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, st7571_id);
+
+static struct i2c_driver st7571_i2c_driver = {
+ .driver = {
+ .name = "st7571",
+ .of_match_table = st7571_of_match,
+ },
+ .probe = st7571_probe,
+ .remove = st7571_remove,
+ .id_table = st7571_id,
+};
+
+module_i2c_driver(st7571_i2c_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("DRM Driver for Sitronix ST7571 LCD controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index a29672d84ede..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c
index 1d60f6e5b3bc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/sitronix/st7735r.c
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index dd2006d51c7a..eec43d1a5595 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
- unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH);
+ unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH);
unsigned int height = ssd130x->height;
memset(data_array, 0, columns * height);
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index cb2816985305..a3447622a33c 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
{
struct platform_device *pdev = to_platform_device(dev);
struct dpu_context *ctx = &dpu->ctx;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
dev_err(dev, "failed to map dpu registers\n");
- return -EFAULT;
+ return PTR_ERR(ctx->base);
}
ctx->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 8fc26479bb6b..23b0e1dc547a 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
{
struct platform_device *pdev = to_platform_device(dev);
struct dsi_context *ctx = &dsi->ctx;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
- return -ENXIO;
+ return PTR_ERR(ctx->base);
}
ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 063f82d23d80..8c529b0cca8b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
- struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
@@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev)
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- compo->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (compo->regs == NULL) {
+ compo->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(compo->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(compo->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 4dcddd02629b..7484d3c3f4ed 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -97,7 +97,7 @@ struct sti_dvo {
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
};
struct sti_dvo_connector {
@@ -439,7 +439,6 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -455,20 +454,14 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
connector->dvo = dvo;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = dvo;
- bridge->funcs = &sti_dvo_bridge_funcs;
- bridge->of_node = dvo->dev.of_node;
- drm_bridge_add(bridge);
+ dvo->bridge.driver_private = dvo;
+ dvo->bridge.of_node = dvo->dev.of_node;
+ drm_bridge_add(&dvo->bridge);
- err = drm_bridge_attach(encoder, bridge, NULL, 0);
+ err = drm_bridge_attach(encoder, &dvo->bridge, NULL, 0);
if (err)
return err;
- dvo->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
@@ -490,7 +483,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&dvo->bridge);
return -EINVAL;
}
@@ -499,7 +492,7 @@ static void sti_dvo_unbind(struct device *dev,
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
- drm_bridge_remove(dvo->bridge);
+ drm_bridge_remove(&dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
@@ -511,28 +504,20 @@ static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
- struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
- dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
- if (!dvo) {
- DRM_ERROR("Failed to allocate memory for DVO\n");
- return -ENOMEM;
+ dvo = devm_drm_bridge_alloc(dev, struct sti_dvo, bridge, &sti_dvo_bridge_funcs);
+ if (IS_ERR(dvo)) {
+ DRM_ERROR("Failed to allocate DVO\n");
+ return PTR_ERR(dvo);
}
dvo->dev = pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
- if (!res) {
- DRM_ERROR("Invalid dvo resource\n");
- return -ENOMEM;
- }
- dvo->regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!dvo->regs)
- return -ENOMEM;
+ dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg");
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 14fdc00d2ba0..2c015f563de9 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -246,6 +246,7 @@ struct sti_hda {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
+ struct drm_bridge bridge;
void __iomem *regs;
void __iomem *video_dacs_ctrl;
struct clk *clk_pix;
@@ -262,6 +263,11 @@ struct sti_hda_connector {
#define to_sti_hda_connector(x) \
container_of(x, struct sti_hda_connector, drm_connector)
+static struct sti_hda *drm_bridge_to_sti_hda(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hda, bridge);
+}
+
static u32 hda_read(struct sti_hda *hda, int offset)
{
return readl(hda->regs + offset);
@@ -401,7 +407,7 @@ static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
static void sti_hda_disable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val;
if (!hda->enabled)
@@ -426,7 +432,7 @@ static void sti_hda_disable(struct drm_bridge *bridge)
static void sti_hda_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val, i, mode_idx;
u32 src_filter_y, src_filter_c;
u32 *coef_y, *coef_c;
@@ -517,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 mode_idx;
int hddac_rate;
int ret;
@@ -677,7 +683,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_hda_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -693,13 +698,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = hda;
- bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hda->bridge, NULL, 0);
connector->encoder = encoder;
@@ -745,21 +744,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
- if (!hda)
- return -ENOMEM;
+ hda = devm_drm_bridge_alloc(dev, struct sti_hda, bridge, &sti_hda_bridge_funcs);
+ if (IS_ERR(hda))
+ return PTR_ERR(hda);
hda->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
- if (!res) {
- DRM_ERROR("Invalid hda resource\n");
- return -ENOMEM;
- }
- hda->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hda->regs)
- return -ENOMEM;
+ hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 164a34d793d8..4e7c3d78b2b9 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -168,6 +168,11 @@ struct sti_hdmi_connector {
#define to_sti_hdmi_connector(x) \
container_of(x, struct sti_hdmi_connector, drm_connector)
+static struct sti_hdmi *drm_bridge_to_sti_hdmi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hdmi, bridge);
+}
+
static const struct drm_prop_enum_list colorspace_mode_names[] = {
{ HDMI_COLORSPACE_RGB, "rgb" },
{ HDMI_COLORSPACE_YUV422, "yuv422" },
@@ -749,7 +754,7 @@ static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
u32 val = hdmi_read(hdmi, HDMI_CFG);
@@ -881,7 +886,7 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
DRM_DEBUG_DRIVER("\n");
@@ -936,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
int ret;
DRM_DEBUG_DRIVER("\n");
@@ -1273,7 +1278,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -1289,13 +1293,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
connector->hdmi = hdmi;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -EINVAL;
-
- bridge->driver_private = hdmi;
- bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
connector->encoder = encoder;
@@ -1380,15 +1378,14 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
- struct resource *res;
struct device_node *ddc;
int ret;
DRM_INFO("%s\n", __func__);
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct sti_hdmi, bridge, &sti_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
if (ddc) {
@@ -1399,17 +1396,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
}
hdmi->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
- if (!res) {
- DRM_ERROR("Invalid hdmi resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hdmi->regs) {
- ret = -ENOMEM;
+ hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg");
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
goto release_adapter;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 6d4c3f57bc46..91d43dd46f13 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -12,6 +12,7 @@
#include <media/cec-notifier.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_property.h>
@@ -86,6 +87,7 @@ struct sti_hdmi {
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
struct cec_notifier *notifier;
+ struct drm_bridge bridge;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0f658709c9d0..03684062309b 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
- struct resource *res;
DRM_DEBUG_DRIVER("\n");
@@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
}
hqvdp->dev = dev;
-
- /* Get Memory resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hqvdp->regs) {
+ hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hqvdp->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(hqvdp->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index af6c06f448c4..6a464b035de8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
- struct resource *res;
DRM_INFO("%s\n", __func__);
@@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return -ENOMEM;
tvout->dev = dev;
-
- /* get memory resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
- if (!res) {
- DRM_ERROR("Invalid glue resource\n");
- return -ENOMEM;
- }
- tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!tvout->regs)
- return -ENOMEM;
+ tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg");
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5ba469b711b5..ee81691b3203 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
- struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENOMEM;
- }
- vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vtg->regs) {
+ vtg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vtg->regs)) {
DRM_ERROR("failed to remap I/O memory\n");
- return -ENOMEM;
+ return PTR_ERR(vtg->regs);
}
vtg->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 4613e8e3b8fd..07788e8d3d83 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int lvds_attach(struct drm_bridge *bridge,
+static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
- struct drm_encoder *encoder = bridge->encoder;
int ret;
- if (!bridge->encoder) {
+ if (!encoder) {
drm_err(bridge->dev, "Parent encoder object not found\n");
return -ENODEV;
}
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
/* No cloning support */
- bridge->encoder->possible_clones = 0;
+ encoder->possible_clones = 0;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
@@ -1050,9 +1049,9 @@ static int lvds_probe(struct platform_device *pdev)
dev_dbg(dev, "Probing LVDS driver...\n");
- lvds = devm_kzalloc(dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(dev, struct stm_lvds, lvds_bridge, &lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -1165,7 +1164,6 @@ static int lvds_probe(struct platform_device *pdev)
goto err_lvds_probe;
}
- lvds->lvds_bridge.funcs = &lvds_bridge_funcs;
lvds->lvds_bridge.of_node = dev->of_node;
lvds->hw_version = lvds_read(lvds, LVDS_VERR);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 58480d8e4f70..c100d29b1a89 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -212,7 +212,7 @@ void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
mode, encoding, range);
return;
@@ -228,7 +228,7 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
return;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b41d33baa30..31a8409b98f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -274,6 +274,7 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
u32 bld_base = sun8i_blender_base(mixer);
+ struct regmap *bld_regs = sun8i_blender_regmap(mixer);
struct drm_plane_state *plane_state;
struct drm_plane *plane;
u32 route = 0, pipe_en = 0;
@@ -313,12 +314,13 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
}
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
pipe_en | SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
- SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
+ if (mixer->cfg->de_type != SUN8I_MIXER_DE33)
+ regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
+ SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
@@ -367,25 +369,31 @@ static void sun8i_mixer_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ struct regmap *bld_regs;
u32 bld_base, size, val;
bool interlaced;
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
size = SUN8I_MIXER_SIZE(mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_SIZE, size);
- regmap_write(engine->regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(mixer->top_regs, SUN50I_MIXER_GLOBAL_SIZE, size);
+ else
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_SIZE, size);
+
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
if (interlaced)
val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
else
val = 0;
- regmap_update_bits(engine->regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
+ regmap_update_bits(bld_regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED, val);
DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
@@ -399,12 +407,29 @@ static const struct sunxi_engine_ops sun8i_engine_ops = {
};
static const struct regmap_config sun8i_mixer_regmap_config = {
+ .name = "layers",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xffffc, /* guessed */
};
+static const struct regmap_config sun8i_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x3c,
+};
+
+static const struct regmap_config sun8i_disp_regmap_config = {
+ .name = "display",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x20000,
+};
+
static int sun8i_mixer_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
@@ -425,6 +450,50 @@ static int sun8i_mixer_of_get_id(struct device_node *node)
return of_ep.id;
}
+static void sun8i_mixer_init(struct sun8i_mixer *mixer)
+{
+ struct regmap *top_regs, *disp_regs;
+ unsigned int base = sun8i_blender_base(mixer);
+ int plane_cnt, i;
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ top_regs = mixer->top_regs;
+ disp_regs = mixer->disp_regs;
+ } else {
+ top_regs = mixer->engine.regs;
+ disp_regs = mixer->engine.regs;
+ }
+
+ /* Enable the mixer */
+ regmap_write(top_regs, SUN8I_MIXER_GLOBAL_CTL,
+ SUN8I_MIXER_GLOBAL_CTL_RT_EN);
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(top_regs, SUN50I_MIXER_GLOBAL_CLK, 1);
+
+ /* Set background color to black */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /*
+ * Set fill color of bottom plane to black. Generally not needed
+ * except when VI plane is at bottom (zpos = 0) and enabled.
+ */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
+ for (i = 0; i < plane_cnt; i++)
+ regmap_write(disp_regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
+ SUN8I_MIXER_BLEND_MODE_DEF);
+
+ regmap_update_bits(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -433,8 +502,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
void __iomem *regs;
- unsigned int base;
- int plane_cnt;
int i, ret;
/*
@@ -495,6 +562,30 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
return PTR_ERR(mixer->engine.regs);
}
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->top_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_top_regmap_config);
+ if (IS_ERR(mixer->top_regs)) {
+ dev_err(dev, "Couldn't create the top regmap\n");
+ return PTR_ERR(mixer->top_regs);
+ }
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "display");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->disp_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_disp_regmap_config);
+ if (IS_ERR(mixer->disp_regs)) {
+ dev_err(dev, "Couldn't create the disp regmap\n");
+ return PTR_ERR(mixer->disp_regs);
+ }
+ }
+
mixer->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(mixer->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -534,10 +625,8 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- base = sun8i_blender_base(mixer);
-
/* Reset registers and disable unused sub-engines */
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -551,7 +640,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
- } else {
+ } else if (mixer->cfg->de_type == SUN8I_MIXER_DE2) {
for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -564,31 +653,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
}
- /* Enable the mixer */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
- SUN8I_MIXER_GLOBAL_CTL_RT_EN);
-
- /* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- /*
- * Set fill color of bottom plane to black. Generally not needed
- * except when VI plane is at bottom (zpos = 0) and enabled.
- */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
- for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_MODE(base, i),
- SUN8I_MIXER_BLEND_MODE_DEF);
-
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+ sun8i_mixer_init(mixer);
return 0;
@@ -628,6 +693,7 @@ static void sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
@@ -636,6 +702,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
@@ -644,6 +711,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 432000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -653,6 +721,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -662,6 +731,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -670,6 +740,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
+ .de_type = SUN8I_MIXER_DE2,
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
@@ -680,6 +751,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
.ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -689,6 +761,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x1,
.scanline_yuv = 1024,
@@ -698,6 +771,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -707,6 +781,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -716,7 +791,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
- .is_de3 = true,
+ .de_type = SUN8I_MIXER_DE3,
.mod_rate = 600000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -724,6 +799,17 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE33,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ .ui_num = 3,
+ .vi_num = 1,
+ .map = {0, 6, 7, 8},
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -769,6 +855,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-h6-de3-mixer-0",
.data = &sun50i_h6_mixer0_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-mixer-0",
+ .data = &sun50i_h616_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index d7898c9c9cc0..a1c1cbccc654 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -21,6 +21,9 @@
#define SUN8I_MIXER_GLOBAL_DBUFF 0x8
#define SUN8I_MIXER_GLOBAL_SIZE 0xc
+#define SUN50I_MIXER_GLOBAL_SIZE 0x8
+#define SUN50I_MIXER_GLOBAL_CLK 0xc
+
#define SUN8I_MIXER_GLOBAL_CTL_RT_EN BIT(0)
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
@@ -151,6 +154,12 @@ enum {
CCSC_D1_MIXER0_LAYOUT,
};
+enum sun8i_mixer_type {
+ SUN8I_MIXER_DE2,
+ SUN8I_MIXER_DE3,
+ SUN8I_MIXER_DE33,
+};
+
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -162,8 +171,9 @@ enum {
* @ccsc: select set of CCSC base addresses from the enumeration above.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
- * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @de_type: sun8i_mixer_type enum representing the display engine generation.
* @scaline_yuv: size of a scanline for VI scaler for YUV formats.
+ * @map: channel map for DE variants processing YUV separately (DE33)
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -171,8 +181,9 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
- unsigned int is_de3 : 1;
+ unsigned int de_type;
unsigned int scanline_yuv;
+ unsigned int map[6];
};
struct sun8i_mixer {
@@ -184,6 +195,9 @@ struct sun8i_mixer {
struct clk *bus_clk;
struct clk *mod_clk;
+
+ struct regmap *top_regs;
+ struct regmap *disp_regs;
};
enum {
@@ -214,13 +228,22 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
static inline u32
sun8i_blender_base(struct sun8i_mixer *mixer)
{
- return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+ return mixer->cfg->de_type == SUN8I_MIXER_DE3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline struct regmap *
+sun8i_blender_regmap(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->de_type == SUN8I_MIXER_DE33 ?
+ mixer->disp_regs : mixer->engine.regs;
}
static inline u32
sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_CH_BASE + channel * DE3_CH_SIZE;
else
return DE2_CH_BASE + channel * DE2_CH_SIZE;
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index b90e5edef4e8..f97be0040aab 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -23,6 +23,7 @@
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
@@ -51,6 +52,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -59,6 +61,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -91,22 +94,34 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
- sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
- dst_h, hscale, vscale, hphase, vphase);
- sun8i_ui_scaler_enable(mixer, channel, true);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ sun8i_vi_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase,
+ state->fb->format);
+ sun8i_vi_scaler_enable(mixer, channel, true);
+ } else {
+ sun8i_ui_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase);
+ sun8i_ui_scaler_enable(mixer, channel, true);
+ }
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- sun8i_ui_scaler_enable(mixer, channel, false);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ sun8i_vi_scaler_enable(mixer, channel, false);
+ else
+ sun8i_ui_scaler_enable(mixer, channel, false);
}
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index ae0806bccac7..8b7a58e27517 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -93,7 +93,7 @@ static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
{
int vi_num = mixer->cfg->vi_num;
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * vi_num +
DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 9c09d9c08496..a09ee4097537 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -25,7 +25,7 @@ static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
ch_base = sun8i_channel_base(mixer, channel);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
@@ -55,6 +55,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -66,6 +67,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -183,10 +185,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
@@ -483,7 +485,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
@@ -507,7 +509,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
- if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
+ if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
@@ -524,7 +526,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index 7ba75011adf9..82df6244af88 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -835,7 +835,9 @@ static const u32 bicubic4coefftab32[480] = {
static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return sun8i_channel_base(mixer, channel) + 0x3000;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * channel;
else
@@ -956,7 +958,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig
new file mode 100644
index 000000000000..9c9884c7efc6
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Drivers for system framebuffers"
+ depends on DRM
+
+config DRM_SYSFB_HELPER
+ tristate
+ depends on DRM
+
+config DRM_EFIDRM
+ tristate "EFI framebuffer driver"
+ depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for EFI framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via EFI interfaces.
+
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM && MMU
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
+ to use UEFI and VESA framebuffers.
+
+config DRM_VESADRM
+ tristate "VESA framebuffer driver"
+ depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for VESA framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via VBE interfaces.
+
+endmenu
diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile
new file mode 100644
index 000000000000..a156c496413d
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+drm_sysfb_helper-y := \
+ drm_sysfb.o \
+ drm_sysfb_modeset.o
+drm_sysfb_helper-$(CONFIG_SCREEN_INFO) += drm_sysfb_screen_info.o
+obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o
+
+obj-$(CONFIG_DRM_EFIDRM) += efidrm.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
+obj-$(CONFIG_DRM_VESADRM) += vesadrm.o
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb.c b/drivers/gpu/drm/sysfb/drm_sysfb.c
new file mode 100644
index 000000000000..308f82153b15
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (value > min(max, INT_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int);
+
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return drm_sysfb_get_validated_int(dev, name, value, max);
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int0);
+
+MODULE_DESCRIPTION("Helpers for DRM sysfb drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
new file mode 100644
index 000000000000..1424b63dde99
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DRM_SYSFB_HELPER_H
+#define DRM_SYSFB_HELPER_H
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+
+#include <video/pixel_format.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+
+struct drm_format_info;
+struct drm_scanout_buffer;
+struct screen_info;
+
+/*
+ * Input parsing
+ */
+
+struct drm_sysfb_format {
+ struct pixel_format pixel;
+ u32 fourcc;
+};
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+#if defined(CONFIG_SCREEN_INFO)
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si);
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si);
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res);
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size);
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size);
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si);
+#endif
+
+/*
+ * Input parsing
+ */
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+/*
+ * Display modes
+ */
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm);
+
+/*
+ * Device
+ */
+
+struct drm_sysfb_device {
+ struct drm_device dev;
+
+ const u8 *edid; /* can be NULL */
+
+ /* hardware settings */
+ struct drm_display_mode fb_mode;
+ const struct drm_format_info *fb_format;
+ unsigned int fb_pitch;
+ unsigned int fb_gamma_lut_size;
+
+ /* hardware-framebuffer kernel address */
+ struct iosys_map fb_addr;
+};
+
+static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_sysfb_device, dev);
+}
+
+/*
+ * Plane
+ */
+
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \
+ ((_num_native) + 1)
+
+#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_SYSFB_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_sysfb_plane_helper_atomic_check, \
+ .atomic_update = drm_sysfb_plane_helper_atomic_update, \
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+
+#define DRM_SYSFB_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+/*
+ * CRTC
+ */
+
+struct drm_sysfb_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static inline struct drm_sysfb_crtc_state *
+to_drm_sysfb_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct drm_sysfb_crtc_state, base);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+#define DRM_SYSFB_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_sysfb_crtc_helper_mode_valid, \
+ .atomic_check = drm_sysfb_crtc_helper_atomic_check
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define DRM_SYSFB_CRTC_FUNCS \
+ .reset = drm_sysfb_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state
+
+/*
+ * Connector
+ */
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_sysfb_connector_helper_get_modes
+
+#define DRM_SYSFB_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+/*
+ * Mode config
+ */
+
+#define DRM_SYSFB_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#endif
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
new file mode 100644
index 000000000000..1bcdb5ee8f09
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm_sysfb_helper.h"
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ {
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
+ };
+
+ return mode;
+ }
+}
+EXPORT_SYMBOL(drm_sysfb_mode);
+
+/*
+ * Plane
+ */
+
+static u32 to_nonalpha_fourcc(u32 fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
+static bool is_listed_fourcc(const u32 *fourccs, size_t nfourccs, u32 fourcc)
+{
+ const u32 *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_sysfb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and additional emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Sysfb devices that have to emulate
+ * the format should use drm_sysfb_build_fourcc_list() to create a list
+ * of supported color formats. The returned list can be handed over to
+ * drm_universal_plane_init() et al. Native formats will go before
+ * emulated formats. Native formats with alpha channel will be replaced
+ * by equal formats without alpha channel, as primary planes usually
+ * don't support alpha. Other heuristics might be applied to optimize
+ * the sorting order. Formats near the beginning of the list are usually
+ * preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const u32 extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = to_nonalpha_fourcc(native_fourccs[i]);
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sysfb->fb_format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sysfb->fb_pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
+
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update);
+
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable);
+
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+
+ sb->width = sysfb->fb_mode.hdisplay;
+ sb->height = sysfb->fb_mode.vdisplay;
+ sb->format = sysfb->fb_format;
+ sb->pitch[0] = sysfb->fb_pitch;
+ sb->map[0] = sysfb->fb_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+
+/*
+ * CRTC
+ */
+
+static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base);
+
+ kfree(sysfb_crtc_state);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid);
+
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ const size_t gamma_lut_length =
+ sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut);
+ const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (crtc->state)
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
+
+ sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
+ if (sysfb_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_reset);
+
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL);
+ if (!new_sysfb_crtc_state)
+ return NULL;
+
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base);
+ new_sysfb_crtc_state->format = sysfb_crtc_state->format;
+
+ return &new_sysfb_crtc_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state);
+
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state));
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state);
+
+/*
+ * Connector
+ */
+
+static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_sysfb_device *sysfb = data;
+ const u8 *edid = sysfb->edid;
+ size_t off = block * EDID_LENGTH;
+ size_t end = off + len;
+
+ if (!edid)
+ return -EINVAL;
+ if (end > EDID_LENGTH)
+ return -EINVAL;
+ memcpy(buf, &edid[off], len);
+
+ /*
+ * We don't have EDID extensions available and reporting them
+ * will upset DRM helpers. Thus clear the extension field and
+ * update the checksum. Adding the extension flag to the checksum
+ * does this.
+ */
+ buf[127] += buf[126];
+ buf[126] = 0;
+
+ return 0;
+}
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ if (sysfb->edid) {
+ drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb);
+ drm_edid_connector_update(connector, drm_edid);
+ drm_edid_free(drm_edid);
+ }
+
+ /* Return the fixed mode even with EDID */
+ return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
new file mode 100644
index 000000000000..0b3fb874a51f
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/screen_info.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+static s64 drm_sysfb_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > min(max, S64_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_width_si);
+
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_height_si);
+
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_warn(dev, "memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(drm_sysfb_get_memory_si);
+
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return drm_sysfb_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+EXPORT_SYMBOL(drm_sysfb_get_stride_si);
+
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return drm_sysfb_get_validated_size0(dev, "visible size", vsize, size);
+}
+EXPORT_SYMBOL(drm_sysfb_get_visible_size_si);
+
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si)
+{
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < nformats; ++i) {
+ const struct pixel_format *f = &formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ drm_warn(dev, "No compatible color format found\n");
+
+ return format;
+}
+EXPORT_SYMBOL(drm_sysfb_get_format_si);
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
new file mode 100644
index 000000000000..1883c4a8604c
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/efi.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "efidrm"
+#define DRIVER_DESC "DRM driver for EFI platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start,
+ resource_size_t len)
+{
+ u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+ resource_size_t end = start + len;
+ efi_memory_desc_t md;
+ u64 md_end;
+
+ if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md))
+ goto out;
+
+ md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT);
+ if (end > md_end)
+ goto out;
+
+ attribute &= md.attribute;
+ if (attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= attribute;
+ }
+
+out:
+ return mem_flags;
+}
+
+/*
+ * EFI device
+ */
+
+struct efidrm_device {
+ struct drm_sysfb_device sysfb;
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+/*
+ * Modesetting
+ */
+
+static const u64 efidrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs efidrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs efidrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs efidrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs efidrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs efidrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize, mem_flags;
+ struct resource resbuf;
+ struct resource *res;
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base = NULL;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_EFI)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * EFI DRM driver
+ */
+
+ efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev);
+ if (IS_ERR(efi))
+ return ERR_CAST(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = efidrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+#if defined(CONFIG_FIRMWARE_EDID)
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ mem_flags = efidrm_get_mem_flags(dev, res->start, vsize);
+
+ if (mem_flags & EFI_MEMORY_WC)
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_UC)
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_WT)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WB);
+ else
+ drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags);
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &efidrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
+
+ primary_plane = &efi->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
+ efi->formats, nformats,
+ efidrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &efi->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &efidrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &efi->encoder;
+ ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &efi->connector;
+ ret = drm_connector_init(dev, connector, &efidrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &efidrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return efi;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(efidrm_fops);
+
+static struct drm_driver efidrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &efidrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int efidrm_probe(struct platform_device *pdev)
+{
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ efi = efidrm_device_create(&efidrm_driver, pdev);
+ if (IS_ERR(efi))
+ return PTR_ERR(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void efidrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver efidrm_platform_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
+ },
+ .probe = efidrm_probe,
+ .remove = efidrm_remove,
+};
+
+module_platform_driver(efidrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 13491c0e704a..8d8ab39c5f36 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -8,12 +8,13 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -21,7 +22,8 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_sysfb_helper.h"
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
@@ -76,20 +78,12 @@ enum ofdrm_model {
static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
{
- if (!value) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return display_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
@@ -226,6 +220,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
+static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node,
+ u8 buf[EDID_LENGTH])
+{
+ int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH);
+
+ if (ret)
+ return NULL;
+ return buf;
+}
+
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
@@ -290,22 +294,17 @@ struct ofdrm_device_funcs {
};
struct ofdrm_device {
- struct drm_device dev;
- struct platform_device *pdev;
+ struct drm_sysfb_device sysfb;
const struct ofdrm_device_funcs *funcs;
- /* firmware-buffer settings */
- struct iosys_map screen_base;
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
/* colormap */
void __iomem *cmap_base;
+ u8 edid[EDID_LENGTH];
+
/* modesetting */
- uint32_t formats[8];
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -314,7 +313,7 @@ struct ofdrm_device {
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
- return container_of(dev, struct ofdrm_device, dev);
+ return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb);
}
/*
@@ -354,7 +353,7 @@ static void ofdrm_pci_release(void *data)
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
@@ -397,7 +396,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev)
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
- struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
@@ -423,7 +422,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
@@ -456,7 +455,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -618,7 +617,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
cpu_to_be32(0x00),
};
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -645,36 +644,36 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
writeb(b, data);
}
-static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
- const struct drm_format_info *format)
+static void ofdrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ odev->funcs->cmap_write(odev, i8, r8, g8, b8);
+}
+
+static void ofdrm_device_fill_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format)
{
- struct drm_device *dev = &odev->dev;
- int i;
+ struct drm_device *dev = &odev->sysfb.dev;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = i * 8 + i / 4;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = i * 8 + i / 4;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_fill_gamma_565(crtc, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
- odev->funcs->cmap_write(odev, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -683,42 +682,21 @@ static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
}
}
-static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ofdrm_device_load_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- struct drm_device *dev = &odev->dev;
- int i;
+ struct drm_device *dev = &odev->sysfb.dev;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = lut[i * 8 + i / 4].red >> 8;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = lut[i * 8 + i / 4].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
- unsigned char r = lut[i].red >> 8;
- unsigned char g = lut[i].green >> 8;
- unsigned char b = lut[i].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_888(crtc, lut, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -731,298 +709,60 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
* Modesetting
*/
-struct ofdrm_crtc_state {
- struct drm_crtc_state base;
-
- /* Primary-plane format; required for color mgmt. */
- const struct drm_format_info *format;
+static const u64 ofdrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
-{
- return container_of(base, struct ofdrm_crtc_state, base);
-}
-
-static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
-{
- __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
- kfree(ofdrm_crtc_state);
-}
-
-static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *new_state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != odev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- odev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
- new_ofdrm_crtc_state->format = new_fb->format;
-
- return 0;
-}
-
-static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = odev->screen_base;
- struct drm_rect dst_clip = plane_state->dst;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct iosys_map dst = odev->screen_base;
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_rect dst_clip;
- unsigned long lines, linepixels, i;
- int idx;
-
- drm_rect_init(&dst_clip,
- plane_state->src_x >> 16, plane_state->src_y >> 16,
- plane_state->src_w >> 16, plane_state->src_h >> 16);
-
- lines = drm_rect_height(&dst_clip);
- linepixels = drm_rect_width(&dst_clip);
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear buffer to black if disabled */
- dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
- for (i = 0; i < lines; ++i) {
- memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
- dst_vmap += dst_pitch;
- }
-
- drm_dev_exit(idx);
-}
-
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ofdrm_primary_plane_helper_atomic_check,
- .atomic_update = ofdrm_primary_plane_helper_atomic_update,
- .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
-}
-
-static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
-
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- if (!new_crtc_state->enable)
- return 0;
-
- ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
- if (ret)
- return ret;
-
- if (new_crtc_state->color_mgmt_changed) {
- struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
-
- if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
- drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- const struct drm_format_info *format = ofdrm_crtc_state->format;
+ const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
- ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
+ ofdrm_device_load_gamma(odev, format, crtc_state->gamma_lut->data);
else
- ofdrm_device_set_gamma_linear(odev, format);
+ ofdrm_device_fill_gamma(odev, format);
}
}
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
- .mode_valid = ofdrm_crtc_helper_mode_valid,
- .atomic_check = ofdrm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
-static void ofdrm_crtc_reset(struct drm_crtc *crtc)
-{
- struct ofdrm_crtc_state *ofdrm_crtc_state =
- kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
-
- if (crtc->state)
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
-
- if (ofdrm_crtc_state)
- __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
- else
- __drm_atomic_helper_crtc_reset(crtc, NULL);
-}
-
-static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- struct ofdrm_crtc_state *ofdrm_crtc_state;
-
- if (drm_WARN_ON(dev, !crtc_state))
- return NULL;
-
- new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
- if (!new_ofdrm_crtc_state)
- return NULL;
-
- ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
-
- __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
- new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
-
- return &new_ofdrm_crtc_state->base;
-}
-
-static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
-{
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
-}
-
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
- .reset = ofdrm_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
- .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
-static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
-}
+static const struct drm_encoder_funcs ofdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
- .get_modes = ofdrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
@@ -1072,32 +812,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_write = ofdrm_qemu_cmap_write,
};
-static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
-{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
- };
-
- return mode;
-}
-
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
+ const u8 *edid;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
@@ -1109,10 +836,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
@@ -1246,16 +974,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
}
}
+ /* EDID is optional */
+ edid = display_get_edid_of(dev, of_node, odev->edid);
+
/*
* Firmware framebuffer
*/
- iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
- odev->mode = ofdrm_mode(width, height);
- odev->format = format;
- odev->pitch = linebytes;
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = linebytes;
+ if (odev->cmap_base)
+ sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE;
+ sysfb->edid = edid;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
@@ -1280,8 +1014,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- odev->formats, ARRAY_SIZE(odev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
@@ -1302,15 +1036,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
- if (odev->cmap_base) {
- drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
- drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
}
/* Encoder */
encoder = &odev->encoder;
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
@@ -1326,6 +1061,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
+ if (edid)
+ drm_connector_attach_edid_property(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
@@ -1360,19 +1097,21 @@ static struct drm_driver ofdrm_driver = {
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, odev->format);
+ drm_client_setup(dev, sysfb->fb_format);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 5d9ab8adf800..8530a3ef8a7a 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -14,21 +14,20 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
+#include "drm_sysfb_helper.h"
+
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_MAJOR 1
@@ -42,24 +41,14 @@ static int
simplefb_get_validated_int(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int
simplefb_get_validated_int0(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (!value) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return simplefb_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *
@@ -217,7 +206,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
*/
struct simpledrm_device {
- struct drm_device dev;
+ struct drm_sysfb_device sysfb;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -236,28 +225,14 @@ struct simpledrm_device {
struct device_link **pwr_dom_links;
#endif
- /* simplefb settings */
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
- /* memory management */
- struct iosys_map screen_base;
-
/* modesetting */
- uint32_t formats[8];
- size_t nformats;
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
-static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
-{
- return container_of(dev, struct simpledrm_device, dev);
-}
-
/*
* Hardware
*/
@@ -284,7 +259,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
@@ -297,7 +272,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
@@ -382,7 +357,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
@@ -395,7 +370,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
@@ -516,7 +491,7 @@ static void simpledrm_device_detach_genpd(void *res)
static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
{
- struct device *dev = sdev->dev.dev;
+ struct device *dev = sdev->sysfb.dev.dev;
int i;
sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -548,7 +523,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
simpledrm_device_detach_genpd(sdev);
return ret;
}
- drm_warn(&sdev->dev,
+ drm_warn(&sdev->sysfb.dev,
"pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
continue;
}
@@ -559,7 +534,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!sdev->pwr_dom_links[i])
- drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i);
}
return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
@@ -575,210 +550,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
* Modesetting
*/
-static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 simpledrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != sdev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- sdev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct drm_rect dst_clip = plane_state->dst;
- struct iosys_map dst = sdev->screen_base;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear screen to black if disabled */
- iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
-
- drm_dev_exit(idx);
-}
-
-static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
- struct drm_scanout_buffer *sb)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
-
- sb->width = sdev->mode.hdisplay;
- sb->height = sdev->mode.vdisplay;
- sb->format = sdev->format;
- sb->pitch[0] = sdev->pitch;
- sb->map[0] = sdev->screen_base;
-
- return 0;
-}
-
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = simpledrm_primary_plane_helper_atomic_check,
- .atomic_update = simpledrm_primary_plane_helper_atomic_update,
- .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
- .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
-}
-
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
- .mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
-}
-
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
* Init / Cleanup
*/
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height,
- unsigned int width_mm,
- unsigned int height_mm)
-{
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height, width_mm, height_mm)
- };
-
- return mode;
-}
-
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
@@ -793,10 +614,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, sdev);
/*
@@ -858,20 +680,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- /*
- * Assume a monitor resolution of 96 dpi if physical dimensions
- * are not specified to get a somewhat reasonable screen size.
- */
- if (!width_mm)
- width_mm = DRM_MODE_RES_MM(width, 96ul);
- if (!height_mm)
- height_mm = DRM_MODE_RES_MM(height, 96ul);
-
- sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
- sdev->format = format;
- sdev->pitch = stride;
+ sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
@@ -895,7 +708,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (IS_ERR(screen_base))
return screen_base;
- iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr(&sysfb->fb_addr, screen_base);
} else {
void __iomem *screen_base;
@@ -928,7 +741,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!screen_base)
return ERR_PTR(-ENOMEM);
- iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
}
/*
@@ -951,8 +764,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- sdev->formats, ARRAY_SIZE(sdev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
@@ -1027,19 +840,21 @@ static struct drm_driver simpledrm_driver = {
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, sdev->format);
+ drm_client_setup(dev, sdev->sysfb.fb_format);
return 0;
}
@@ -1047,7 +862,7 @@ static int simpledrm_probe(struct platform_device *pdev)
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
drm_dev_unplug(dev);
}
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
new file mode 100644
index 000000000000..90615e9ac86b
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/ioport.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+#include <video/vga.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "vesadrm"
+#define DRIVER_DESC "DRM driver for VESA platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VESADRM_GAMMA_LUT_SIZE 256
+
+static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+/*
+ * VESA device
+ */
+
+struct vesadrm_device {
+ struct drm_sysfb_device sysfb;
+
+#if defined(CONFIG_X86_32)
+ /* VESA Protected Mode interface */
+ struct {
+ const u8 *PrimaryPalette;
+ } pmi;
+#endif
+
+ void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue);
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
+{
+ return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb);
+}
+
+/*
+ * Palette
+ */
+
+static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u8 i8 = index;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ outb_p(i8, VGA_PEL_IW);
+ outb_p(r8, VGA_PEL_D);
+ outb_p(g8, VGA_PEL_D);
+ outb_p(b8, VGA_PEL_D);
+}
+
+#if defined(CONFIG_X86_32)
+static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u32 i32 = index;
+ struct {
+ u8 b8;
+ u8 g8;
+ u8 r8;
+ u8 x8;
+ } PaletteEntry = {
+ blue >> 8,
+ green >> 8,
+ red >> 8,
+ 0x00,
+ };
+
+ __asm__ __volatile__ (
+ "call *(%%esi)"
+ : /* no return value */
+ : "a" (0x4f09),
+ "b" (0),
+ "c" (1),
+ "d" (i32),
+ "D" (&PaletteEntry),
+ "S" (&vesa->pmi.PrimaryPalette));
+}
+#endif
+
+static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ u8 i8 = index & 0xff;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ vesa->cmap_write(vesa, i8, red, green, blue);
+}
+
+static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
+ break;
+ case DRM_FORMAT_RGB565:
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ break;
+ case DRM_FORMAT_RGB565:
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+static const u64 vesadrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ } else {
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ }
+ }
+}
+
+static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+ .atomic_flush = vesadrm_crtc_helper_atomic_flush,
+};
+
+static const struct drm_crtc_funcs vesadrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs vesadrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs vesadrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize;
+ struct resource resbuf;
+ struct resource *res;
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_VLFB)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * VESA DRM driver
+ */
+
+ vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev);
+ if (IS_ERR(vesa))
+ return ERR_CAST(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = vesadrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+ if (!__screen_info_vbe_mode_nonvga(si)) {
+ vesa->cmap_write = vesadrm_vga_cmap_write;
+ } else {
+#if defined(CONFIG_X86_32)
+ phys_addr_t pmi_base = __screen_info_vesapm_info_base(si);
+
+ if (pmi_base) {
+ const u16 *pmi_addr = phys_to_virt(pmi_base);
+
+ vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2];
+ vesa->cmap_write = vesadrm_pmi_cmap_write;
+ } else
+#endif
+ if (format->is_color_indexed)
+ drm_warn(dev, "hardware palette is unchangeable, colors may be incorrect\n");
+ }
+
+#if defined(CONFIG_FIRMWARE_EDID)
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+ if (vesa->cmap_write)
+ sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &vesadrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
+
+ primary_plane = &vesa->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
+ vesa->formats, nformats,
+ vesadrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &vesa->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &vesadrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs);
+
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
+ }
+
+ /* Encoder */
+
+ encoder = &vesa->encoder;
+ ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &vesa->connector;
+ ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return vesa;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(vesadrm_fops);
+
+static struct drm_driver vesadrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &vesadrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int vesadrm_probe(struct platform_device *pdev)
+{
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ vesa = vesadrm_device_create(&vesadrm_driver, pdev);
+ if (IS_ERR(vesa))
+ return PTR_ERR(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void vesadrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver vesadrm_platform_driver = {
+ .driver = {
+ .name = "vesa-framebuffer",
+ },
+ .probe = vesadrm_probe,
+ .remove = vesadrm_remove,
+};
+
+module_platform_driver(vesadrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 798507a8ae56..59d5c1ba145a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1321,10 +1321,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
+ enum drm_plane_type type;
+
+ if (primary)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
- index);
+ index, type);
if (IS_ERR(plane))
return plane;
@@ -1332,10 +1338,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
- if (!primary) {
- plane->type = DRM_PLANE_TYPE_PRIMARY;
+ if (!primary)
primary = plane;
- }
}
}
}
@@ -1389,7 +1393,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..990e744b0923 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
index cb12ed0c54e7..695060cafac0 100644
--- a/drivers/gpu/drm/tegra/dp.h
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2cd8dcb959c0..e5297ac5c0fc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -501,14 +501,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
- if (PTR_ERR(dpaux->vdd) != -ENODEV) {
- if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get VDD supply: %ld\n",
- PTR_ERR(dpaux->vdd));
-
- return PTR_ERR(dpaux->vdd);
- }
+ if (PTR_ERR(dpaux->vdd) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(dpaux->vdd),
+ "failed to get VDD supply\n");
dpaux->vdd = NULL;
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0b65e69f3a8a..1dd3670f37db 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -185,11 +185,13 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes);
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd);
#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 9bb077558167..b5089b772267 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
- struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto remove;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs)) {
err = PTR_ERR(dsi->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index c0d85463eb1a..17f616bbcb45 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -30,6 +30,14 @@ int falcon_wait_idle(struct falcon *falcon)
(value == 0), 10, 100000);
}
+static int falcon_dma_wait_not_full(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ !(value & FALCON_DMATRFCMD_FULL), 10, 100000);
+}
+
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
@@ -44,6 +52,7 @@ static int falcon_copy_chunk(struct falcon *falcon,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+ int err;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
@@ -56,11 +65,15 @@ static int falcon_copy_chunk(struct falcon *falcon,
*/
cmd |= FALCON_DMATRFCMD_DMACTX(1);
+ err = falcon_dma_wait_not_full(falcon);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
- return falcon_dma_wait_idle(falcon);
+ return 0;
}
static void falcon_copy_firmware_image(struct falcon *falcon,
@@ -191,6 +204,11 @@ int falcon_boot(struct falcon *falcon)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
+ /* wait for DMA to complete */
+ err = falcon_dma_wait_idle(falcon);
+ if (err < 0)
+ return err;
+
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 1955cf11a8a6..902bb7e4fd0f 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -47,6 +47,7 @@
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_FULL (1 << 0)
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 46170753699d..dd041089f797 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -102,6 +102,7 @@ static const struct drm_framebuffer_funcs tegra_fb_funcs = {
};
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes)
@@ -114,7 +115,7 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++)
fb->obj[i] = &planes[i]->gem;
@@ -132,9 +133,9 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(drm, cmd);
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct drm_framebuffer *fb;
@@ -166,7 +167,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
planes[i] = to_tegra_bo(gem);
}
- fb = tegra_fb_alloc(drm, cmd, planes, i);
+ fb = tegra_fb_alloc(drm, info, cmd, planes, i);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
goto unreference;
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index cd9d798f8870..1b70f5e164af 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -106,7 +106,9 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(info);
}
- fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+ fb = tegra_fb_alloc(drm,
+ drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
+ &cmd, &bo, 1);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ace3e5a805cf..41a285ec889f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -17,7 +17,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
-#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
@@ -524,10 +523,10 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
+ if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ dma_buf_detach(gem->dma_buf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index fa6140fc37fb..8f779f23dc09 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index)
+ unsigned int index,
+ enum drm_plane_type type)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 23c4b2115ed1..a66f18c4facc 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index);
+ unsigned int index,
+ enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
index 2d9a0a3f6c38..7a38664e890e 100644
--- a/drivers/gpu/drm/tegra/nvdec.c
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
if (!client->group) {
virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
-
- err = dma_mapping_error(nvdec->dev, iova);
- if (err < 0)
- return err;
+ if (!virt)
+ return -ENOMEM;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
if (IS_ERR(virt))
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 1e8ec50b759e..ff5a749710db 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
.atomic_check = tegra_rgb_encoder_atomic_check,
};
+static void tegra_dc_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
int tegra_dc_rgb_probe(struct tegra_dc *dc)
{
struct device_node *np;
@@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int err;
np = of_get_child_by_name(dc->dev->of_node, "rgb");
- if (!np || !of_device_is_available(np))
+ if (!np)
+ return -ENODEV;
+
+ err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np);
+ if (err < 0)
+ return err;
+
+ if (!of_device_is_available(np))
return -ENODEV;
rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f98f70eda906..21f3dfdcc5c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
- err = drm_dp_link_power_down(sor->aux, &sor->link);
+ err = drm_dp_link_power_down(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
@@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
else
dev_dbg(sor->dev, "link training succeeded\n");
- err = drm_dp_link_power_up(sor->aux, &sor->link);
+ err = drm_dp_link_power_up(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 0109bcf7faa5..c0e952293ad0 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_test.o \
drm_atomic_state_test.o \
+ drm_bridge_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
@@ -21,6 +23,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o
+ drm_rect_test.o \
+ drm_sysfb_modeset_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c
new file mode 100644
index 000000000000..ea91bec6569e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_test.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_atomic functions
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_atomic_init_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_atomic_test_priv *create_device(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_connector *connector;
+ struct drm_encoder *enc;
+ struct drm_device *drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+ priv->plane = plane;
+
+ crtc = drm_kunit_helper_create_crtc(test, drm,
+ plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(crtc))
+ return ERR_CAST(crtc);
+ priv->crtc = crtc;
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(drm, connector,
+ &drm_atomic_init_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs);
+
+ drm_connector_attach_encoder(connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_test_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_connector *curr_connector;
+ int ret;
+
+ priv = create_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm,
+ priv->crtc, &priv->connector,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn:
+ curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder,
+ &ctx);
+ if (PTR_ERR(curr_connector) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_conn;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = {
+ KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder),
+ { }
+};
+
+
+static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = {
+ .name = "drm_test_atomic_get_connector_for_encoder",
+ .test_cases = drm_atomic_get_connector_for_encoder_tests,
+};
+
+kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_atomic functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
new file mode 100644
index 000000000000..887020141c7f
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_bridge functions
+ */
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+/*
+ * Mimick the typical "private" struct defined by a bridge driver, which
+ * embeds a bridge plus other fields.
+ *
+ * Having at least one member before @bridge ensures we test non-zero
+ * @bridge offset.
+ */
+struct drm_bridge_priv {
+ unsigned int enable_count;
+ unsigned int disable_count;
+ struct drm_bridge bridge;
+ void *data;
+};
+
+struct drm_bridge_init_priv {
+ struct drm_device drm;
+ /** @dev: device, only for tests not needing a whole drm_device */
+ struct device *dev;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge_priv *test_bridge;
+ struct drm_connector *connector;
+ bool destroyed;
+};
+
+static struct drm_bridge_priv *bridge_to_priv(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct drm_bridge_priv, bridge);
+}
+
+static void drm_test_bridge_priv_destroy(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *bridge_priv = bridge_to_priv(bridge);
+ struct drm_bridge_init_priv *priv = (struct drm_bridge_init_priv *)bridge_priv->data;
+
+ priv->destroyed = true;
+}
+
+static void drm_test_bridge_enable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_disable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
+ .enable = drm_test_bridge_enable,
+ .disable = drm_test_bridge_disable,
+};
+
+static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
+ .atomic_enable = drm_test_bridge_atomic_enable,
+ .atomic_disable = drm_test_bridge_atomic_disable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper,
+ drm_bridge_remove,
+ struct drm_bridge *);
+
+static int drm_kunit_bridge_add(struct kunit *test,
+ struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+
+ return kunit_add_action_or_reset(test,
+ drm_bridge_remove_wrapper,
+ bridge);
+}
+
+static struct drm_bridge_init_priv *
+drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_bridge_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ priv->test_bridge = devm_drm_bridge_alloc(dev, struct drm_bridge_priv, bridge, funcs);
+ if (IS_ERR(priv->test_bridge))
+ return ERR_CAST(priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(priv->plane))
+ return ERR_CAST(priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(priv->crtc))
+ return ERR_CAST(priv->crtc);
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+
+ bridge = &priv->test_bridge->bridge;
+ bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
+
+ ret = drm_kunit_bridge_add(test, bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(enc, bridge, NULL, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->connector = drm_bridge_connector_init(drm, enc);
+ if (IS_ERR(priv->connector))
+ return ERR_CAST(priv->connector);
+
+ drm_connector_attach_encoder(priv->connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns the last committed
+ * state for an atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge_state *curr_bridge_state;
+ struct drm_bridge_state *bridge_state;
+ struct drm_atomic_state *state;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &priv->drm;
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_commit:
+ bridge = &priv->test_bridge->bridge;
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_state:
+ ret = drm_modeset_lock(&bridge->base.lock, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_state;
+ }
+
+ curr_bridge_state = drm_bridge_get_current_state(bridge);
+ KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state);
+
+ drm_modeset_unlock(&bridge->base.lock);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns NULL for a
+ * non-atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge *bridge;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ /*
+ * NOTE: Strictly speaking, we should take the bridge->base.lock
+ * before calling that function. However, bridge->base is only
+ * initialized if the bridge is atomic, while we explicitly
+ * initialize one that isn't there.
+ *
+ * In order to avoid unnecessary warnings, let's skip the
+ * locking. The function would return NULL in all cases anyway,
+ * so we don't really have any concurrency to worry about.
+ */
+ bridge = &priv->test_bridge->bridge;
+ KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
+}
+
+static struct kunit_case drm_bridge_get_current_state_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy),
+ { }
+};
+
+
+static struct kunit_suite drm_bridge_get_current_state_test_suite = {
+ .name = "drm_test_bridge_get_current_state",
+ .test_cases = drm_bridge_get_current_state_tests,
+};
+
+/*
+ * Test that an atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge_priv *bridge_priv;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
+}
+
+/*
+ * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic
+ * bridge will fail and not call the enable / disable callbacks
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge_priv *bridge_priv;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 0);
+}
+
+/*
+ * Test that a non-atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge_priv *bridge_priv;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
+}
+
+static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy),
+ { }
+};
+
+static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
+ .name = "drm_test_bridge_helper_reset_crtc",
+ .test_cases = drm_bridge_helper_reset_crtc_tests,
+};
+
+static int drm_test_bridge_alloc_init(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ priv->dev = kunit_device_register(test, "drm-bridge-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ test->priv = priv;
+
+ priv->test_bridge = devm_drm_bridge_alloc(priv->dev, struct drm_bridge_priv, bridge,
+ &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ return 0;
+}
+
+/*
+ * Test that a bridge is freed when the device is destroyed in lack of
+ * other drm_bridge_get/put() operations.
+ */
+static void drm_test_drm_bridge_alloc_basic(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+/*
+ * Test that a bridge is not freed when the device is destroyed when there
+ * is still a reference to it, and freed when that reference is put.
+ */
+static void drm_test_drm_bridge_alloc_get_put(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ drm_bridge_get(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ drm_bridge_put(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+static struct kunit_case drm_bridge_alloc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_alloc_basic),
+ KUNIT_CASE(drm_test_drm_bridge_alloc_get_put),
+ { }
+};
+
+static struct kunit_suite drm_bridge_alloc_test_suite = {
+ .name = "drm_bridge_alloc",
+ .init = drm_test_bridge_alloc_init,
+ .test_cases = drm_bridge_alloc_tests,
+};
+
+kunit_test_suites(
+ &drm_bridge_get_current_state_test_suite,
+ &drm_bridge_helper_reset_crtc_test_suite,
+ &drm_bridge_alloc_test_suite,
+);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
+
+MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index b2fdb1a774fe..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- struct drm_display_mode *expected_mode, *mode;
+ struct drm_display_mode *expected_mode;
+ const struct drm_display_mode *mode;
const char *cmdline = "1920x1080@60";
int ret;
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 35cd3405d045..7299fa8971ce 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -279,9 +279,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
0xFF, 0x00,
- 0x4C, 0x99,
- 0x19, 0x66,
- 0xE5, 0xB2,
+ 0x4C, 0x95,
+ 0x1C, 0x69,
+ 0xE2, 0xB2,
},
},
.rgb332_result = {
@@ -430,9 +430,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.gray8_result = {
.dst_pitch = 5,
.expected = {
- 0x3C, 0x33, 0xC4, 0x00, 0x00,
- 0xBB, 0x3C, 0x33, 0x00, 0x00,
- 0x34, 0xBB, 0x3C, 0x00, 0x00,
+ 0x3D, 0x32, 0xC1, 0x00, 0x00,
+ 0xBA, 0x3D, 0x32, 0x00, 0x00,
+ 0x34, 0xBA, 0x3D, 0x00, 0x00,
},
},
.rgb332_result = {
@@ -735,27 +735,22 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, false);
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
- drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, true);
+ drm_fb_xrgb8888_to_rgb565be(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip, &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1407,147 +1335,6 @@ static void drm_test_fb_clip_offset(struct kunit *test)
KUNIT_EXPECT_EQ(test, offset, params->expected_offset);
}
-struct fb_build_fourcc_list_case {
- const char *name;
- u32 native_fourccs[TEST_BUF_SIZE];
- size_t native_fourccs_size;
- u32 expected[TEST_BUF_SIZE];
- size_t expected_fourccs_size;
-};
-
-static struct fb_build_fourcc_list_case fb_build_fourcc_list_cases[] = {
- {
- .name = "no native formats",
- .native_fourccs = { },
- .native_fourccs_size = 0,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "XRGB8888 as native format",
- .native_fourccs = { DRM_FORMAT_XRGB8888 },
- .native_fourccs_size = 1,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "remove duplicates",
- .native_fourccs = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- },
- .native_fourccs_size = 11,
- .expected = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- },
- .expected_fourccs_size = 3,
- },
- {
- .name = "convert alpha formats",
- .native_fourccs = {
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_RGBA1010102,
- DRM_FORMAT_BGRA1010102,
- },
- .native_fourccs_size = 12,
- .expected = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_RGBX1010102,
- DRM_FORMAT_BGRX1010102,
- },
- .expected_fourccs_size = 12,
- },
- {
- .name = "random formats",
- .native_fourccs = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- },
- .native_fourccs_size = 10,
- .expected = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XRGB8888,
- },
- .expected_fourccs_size = 10,
- },
-};
-
-static void fb_build_fourcc_list_case_desc(struct fb_build_fourcc_list_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(fb_build_fourcc_list, fb_build_fourcc_list_cases, fb_build_fourcc_list_case_desc);
-
-static void drm_test_fb_build_fourcc_list(struct kunit *test)
-{
- const struct fb_build_fourcc_list_case *params = test->param_value;
- u32 fourccs_out[TEST_BUF_SIZE] = {0};
- size_t nfourccs_out;
- struct drm_device *drm;
- struct device *dev;
-
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
-
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
-
- nfourccs_out = drm_fb_build_fourcc_list(drm, params->native_fourccs,
- params->native_fourccs_size,
- fourccs_out, TEST_BUF_SIZE);
-
- KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
- KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
-}
-
struct fb_memcpy_case {
const char *name;
u32 format;
@@ -1910,12 +1697,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
memset(buf[i], 0, dst_size[i]);
}
- int blit_result;
-
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_memcpy(dst, dst_pitches, src, &fb, &params->clip);
- KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
@@ -1940,7 +1723,6 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xbgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_abgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_clip_offset, clip_offset_gen_params),
- KUNIT_CASE_PARAM(drm_test_fb_build_fourcc_list, fb_build_fourcc_list_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_memcpy, fb_memcpy_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index 6ea04cc8f324..9b8e01e8cd91 100644
--- a/drivers/gpu/drm/tests/drm_framebuffer_test.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -363,6 +363,7 @@ struct drm_framebuffer_test_priv {
static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev);
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index 925fbc2cda70..68f2c3162354 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
ret = drm_gem_shmem_pin(shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
drm_gem_shmem_unpin(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
}
/*
@@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_vmap(shmem, &map);
+ ret = drm_gem_shmem_vmap_locked(shmem, &map);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
- drm_gem_shmem_vunmap(shmem, &map);
+ drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
@@ -254,7 +254,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
sgt = drm_gem_shmem_get_pages_sgt(shmem);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
for_each_sgtable_sg(sgt, sg, si) {
@@ -284,17 +284,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test)
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise(shmem, -1);
+ ret = drm_gem_shmem_madvise_locked(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise(shmem, 0);
+ ret = drm_gem_shmem_madvise_locked(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
@@ -322,7 +322,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
@@ -332,7 +332,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_TRUE(test, ret);
- drm_gem_shmem_purge(shmem);
+ drm_gem_shmem_purge_locked(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
KUNIT_EXPECT_NULL(test, shmem->sgt);
KUNIT_EXPECT_EQ(test, shmem->madv, -1);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index e97efd3af9ed..8bd412735000 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -33,7 +33,7 @@ struct drm_atomic_helper_connector_hdmi_priv {
struct drm_encoder encoder;
struct drm_connector connector;
- const char *current_edid;
+ const void *current_edid;
size_t current_edid_len;
};
@@ -55,51 +55,8 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
return preferred;
}
-static int light_up_connector(struct kunit *test,
- struct drm_device *drm,
- struct drm_crtc *crtc,
- struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-retry:
- conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(ctx);
- if (!ret)
- goto retry;
- }
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state->enable = true;
- crtc_state->active = true;
-
- ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return 0;
-}
-
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
- const char *edid, size_t edid_len)
+ const void *edid, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
connector_to_priv(connector);
@@ -132,15 +89,15 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
};
static enum drm_mode_status
-reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+reject_100mhz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned long long tmds_rate)
{
return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
}
-static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
- .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+static const struct drm_connector_hdmi_funcs reject_100mhz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100mhz_connector_tmds_char_rate_valid,
};
static int dummy_connector_get_modes(struct drm_connector *connector)
@@ -183,10 +140,11 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc,
- const struct drm_connector_hdmi_funcs *hdmi_funcs)
+__connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ const void *edid_data, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -225,6 +183,8 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
conn = &priv->connector;
+ conn->ycbcr_420_allowed = !!(formats & BIT(HDMI_COLORSPACE_YUV420));
+
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
@@ -240,29 +200,28 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
drm_mode_config_reset(drm);
+ if (edid_data && edid_len) {
+ ret = set_connector_edid(test, &priv->connector, edid_data, edid_len);
+ KUNIT_ASSERT_GT(test, ret, 0);
+ }
+
return priv;
}
+#define drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test, formats, max_bpc, funcs, edid) \
+ __connector_hdmi_init(test, formats, max_bpc, funcs, edid, ARRAY_SIZE(edid))
+
static
struct drm_atomic_helper_connector_hdmi_priv *
drm_kunit_helper_connector_hdmi_init(struct kunit *test,
unsigned int formats,
unsigned int max_bpc)
{
- struct drm_atomic_helper_connector_hdmi_priv *priv;
- int ret;
-
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- formats, max_bpc,
- &dummy_connector_hdmi_funcs);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
-
- ret = set_connector_edid(test, &priv->connector,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
- return priv;
+ return drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ formats,
+ max_bpc,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
}
/*
@@ -298,7 +257,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -364,7 +326,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -432,7 +397,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -448,7 +416,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
@@ -489,7 +457,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -505,7 +476,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
@@ -547,7 +518,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -561,7 +535,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -606,7 +580,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -620,7 +597,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -666,7 +643,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -680,7 +660,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -725,7 +705,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -739,7 +722,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -753,6 +736,107 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
}
/*
+ * Test that for an HDMI connector, with an HDMI monitor, we will
+ * get a limited RGB Quantization Range with a YUV420 mode, no
+ * matter what the value of the Broadcast RGB property is set to.
+ */
+static void drm_test_check_broadcast_rgb_cea_mode_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ broadcast_rgb = *(enum drm_hdmi_broadcast_rgb *)test->param_value;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_conn_state:
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (PTR_ERR(conn_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ conn_state->hdmi.broadcast_rgb = broadcast_rgb;
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.broadcast_rgb, broadcast_rgb);
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+
+ KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static const enum drm_hdmi_broadcast_rgb check_broadcast_rgb_cea_mode_yuv420_tests[] = {
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+ DRM_HDMI_BROADCAST_RGB_FULL,
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+static void
+check_broadcast_rgb_cea_mode_yuv420_desc(const enum drm_hdmi_broadcast_rgb *broadcast_rgb,
+ char *desc)
+{
+ sprintf(desc, "%s", drm_hdmi_connector_get_broadcast_rgb_name(*broadcast_rgb));
+}
+
+KUNIT_ARRAY_PARAM(check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_tests,
+ check_broadcast_rgb_cea_mode_yuv420_desc);
+
+/*
* Test that if we change the maximum bpc property to a different value,
* we trigger a mode change on the connector's CRTC, which will in turn
* disable/enable the connector.
@@ -771,25 +855,25 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -847,25 +931,25 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -918,21 +1002,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_dvi_1080p);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_dvi_1080p,
- ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -941,7 +1022,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -969,26 +1053,26 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1018,26 +1102,26 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1067,26 +1151,26 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1134,7 +1218,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1166,7 +1253,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
* Then we will pick the latter, and the computed TMDS character rate
* will be equal to 1.25 times the mode pixel clock.
*/
-static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1179,19 +1266,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1208,7 +1292,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1224,6 +1311,80 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
/*
* Test that if:
+ * - We have an HDMI connector and a display supporting both RGB and YUV420
+ * - The chosen mode can be supported in YUV420 output format only
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in YUV420/12bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/10bpc.
+ *
+ * Then we will pick the latter, and the computed TMDS character rate
+ * will be equal to 1.25 * 0.5 times the mode pixel clock.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *yuv420_only_mode;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 10, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ yuv420_only_mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, yuv420_only_mode->clock * 625);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if:
* - We have an HDMI connector supporting both RGB and YUV422 and up to
* 12 bpc
* - The chosen mode has a TMDS character rate higher than the display
@@ -1235,7 +1396,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
* Then we will prefer to keep the RGB format with a lower bpc over
* picking YUV422.
*/
-static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1248,21 +1409,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1282,7 +1440,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1296,6 +1457,170 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
}
/*
+ * Test that if:
+ * - We have an HDMI connector supporting both RGB and YUV420 and up to
+ * 12 bpc
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in RGB/10bpc but lower than the display supports in
+ * RGB/8bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/12bpc.
+ *
+ * Then we will prefer to keep the RGB format with a lower bpc over
+ * picking YUV420.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_also(info, preferred));
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 8, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if a driver supports only RGB, but the chosen mode can be
+ * supported by the screen only in YUV420 output format, we end up with
+ * unsuccessful fallback attempts.
+ */
+static void drm_test_check_driver_unsupported_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred, *yuv420_only_mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_FALSE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, drm_mode_is_420_also(info, preferred));
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_crtc_state:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, yuv420_only_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
* Test that if a driver and screen supports RGB and YUV formats, and we
* try to set the VIC 1 mode, we end up with 8bpc RGB even if we could
* have had a higher bpc.
@@ -1313,20 +1638,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1347,7 +1669,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1377,19 +1702,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1414,7 +1736,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1444,21 +1769,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1483,7 +1805,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1514,19 +1839,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1543,7 +1865,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1574,21 +1899,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1605,7 +1927,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1645,7 +1970,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -1678,17 +2006,17 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_full_cea_mode_vic_1),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode_vic_1),
- /*
- * TODO: When we'll have YUV output support, we need to check
- * that the limited range is always set to limited no matter
- * what the value of Broadcast RGB is.
- */
+ KUNIT_CASE_PARAM(drm_test_check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_gen_params),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
- KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
- KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_rgb),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_yuv420),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420),
+ KUNIT_CASE(drm_test_check_driver_unsupported_fallback_yuv420),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_changed),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_output_bpc_dvi),
@@ -1901,28 +2229,20 @@ static void drm_test_check_mode_valid(struct kunit *test)
static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_100_MHz_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100mhz_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
- conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
/*
* Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
* 480p is allowed.
*/
- preferred = find_preferred_mode(conn);
+ preferred = find_preferred_mode(&priv->connector);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
@@ -1940,12 +2260,14 @@ static void drm_test_check_mode_valid_reject(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
+ unsigned char no_edid[] = {};
int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs,
+ no_edid);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1970,20 +2292,15 @@ static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_100mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_100mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
preferred = find_preferred_mode(conn);
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 6358397a5d7a..c59c8528a3f7 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -46,6 +46,13 @@
* Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
* Dummy Descriptor:
* Checksum: 0xab
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_dvi_1080p[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -62,6 +69,10 @@ static const unsigned char test_edid_dvi_1080p[] = {
};
/*
+ *
+ * This edid is intentionally broken with the 100MHz limit. It's meant
+ * to be used only with tests in unusual situations.
+ *
* edid-decode (hex):
*
* 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
@@ -73,14 +84,14 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 14 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10
*
* ----------------
*
@@ -135,8 +146,19 @@ static const unsigned char test_edid_dvi_1080p[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 100 MHz
- * Extended HDMI video details:
- * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ * Checksum: 0x10 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * Failures:
+ *
+ * EDID:
+ * CTA-861: The maximum HDMI TMDS clock is 100000 kHz, but one or more video timings go up to 148500 kHz.
+ *
+ * EDID conformity: FAIL
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -147,11 +169,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -160,7 +182,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xe4
+ 0x00, 0x00, 0x00, 0x10
};
/*
@@ -175,14 +197,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
*
* ----------------
*
@@ -237,8 +259,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xfc Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -249,11 +277,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -262,7 +290,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xfc
};
/*
@@ -277,14 +305,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e0
*
* ----------------
*
@@ -339,8 +367,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xe0 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -351,11 +385,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -364,7 +398,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xe0
};
/*
@@ -379,14 +413,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 a8
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d4
*
* ----------------
*
@@ -447,8 +481,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xa8 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xd4 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -461,9 +501,9 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -472,7 +512,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xa8
+ 0x00, 0x00, 0x00, 0xd4
};
/*
@@ -487,14 +527,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 8a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 44 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 8c
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b8
*
* ----------------
*
@@ -555,8 +595,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0x8c Unused space in Extension Block: 100 bytes
+ * Checksum: 0xb8 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -569,18 +615,250 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xb8
+};
+
+/*
+ * Max resolution:
+ * - 1920x1080@60Hz with RGB, YUV444, YUV422
+ * - 3840x2160@30Hz with YUV420 only
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 200 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 c0 1c 32 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 11 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 bb
+ *
+ * 02 03 29 31 42 90 5f 6c 03 0c 00 10 00 78 28 20
+ * 00 00 01 03 6d d8 5d c4 01 28 80 07 00 00 00 00
+ * 00 00 e3 0f 00 00 e2 0e 5f 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ca
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (960 mm x 540 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 170 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xbb
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 200 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 200 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * Empty Capability Map
+ * YCbCr 4:2:0 Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Checksum: 0xca
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xbb, 0x02, 0x03, 0x29, 0x31,
+ 0x42, 0x90, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x28, 0x20,
+ 0x00, 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x28, 0x80, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x00, 0x00, 0xe2, 0x0e,
+ 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xca
+};
+
+/*
+ * Max resolution: 3840x2160@30Hz with RGB, YUV444, YUV422, YUV420
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 340 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 04 74 00 30 f2 70 5a 80 b0 58
+ * 8a 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 22 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ce
+ *
+ * 02 03 27 31 41 5f 6c 03 0c 00 10 00 78 44 20 00
+ * 00 01 03 6d d8 5d c4 01 44 80 07 00 00 00 00 00
+ * 00 e3 0f 01 00 e1 0e 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 84
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz (1600 mm x 900 mm)
+ * Hfront 176 Hsync 88 Hback 296 Hpol P
+ * Vfront 8 Vsync 10 Vback 72 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 340 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xce
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 340 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 340 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * YCbCr 4:2:0 Video Data Block:
+ * Checksum: 0x84
+ */
+static const unsigned char test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x74, 0x00, 0x30, 0xf2, 0x70,
+ 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x22, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0x02, 0x03, 0x27, 0x31,
+ 0x41, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x44, 0x20, 0x00,
+ 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x44, 0x80, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x01, 0x00, 0xe1, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x8c
+ 0x00, 0x00, 0x00, 0x84
};
#endif // DRM_KUNIT_EDID_H_
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 6f6616cf4966..04edb6079c0d 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -12,6 +13,7 @@
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
@@ -271,6 +273,66 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+/**
+ * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @crtc: The CRTC to enable
+ * @connector: The Connector to enable
+ * @mode: The display mode to configure the CRTC with
+ * @ctx: Locking context
+ *
+ * This function creates an atomic update to enable the route from @crtc
+ * to @connector, with the given @mode.
+ *
+ * Returns:
+ *
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error
+ * returned is EDEADLK, the entire atomic sequence must be restarted.
+ */
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
+ if (ret)
+ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector);
+
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
diff --git a/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
new file mode 100644
index 000000000000..e875d876118f
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../sysfb/drm_sysfb_helper.h"
+
+#define TEST_BUF_SIZE 50
+
+struct sysfb_build_fourcc_list_case {
+ const char *name;
+ u32 native_fourccs[TEST_BUF_SIZE];
+ size_t native_fourccs_size;
+ u32 expected[TEST_BUF_SIZE];
+ size_t expected_fourccs_size;
+};
+
+static struct sysfb_build_fourcc_list_case sysfb_build_fourcc_list_cases[] = {
+ {
+ .name = "no native formats",
+ .native_fourccs = { },
+ .native_fourccs_size = 0,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "XRGB8888 as native format",
+ .native_fourccs = { DRM_FORMAT_XRGB8888 },
+ .native_fourccs_size = 1,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "remove duplicates",
+ .native_fourccs = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ },
+ .native_fourccs_size = 11,
+ .expected = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ },
+ .expected_fourccs_size = 3,
+ },
+ {
+ .name = "convert alpha formats",
+ .native_fourccs = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+ },
+ .native_fourccs_size = 12,
+ .expected = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ },
+ .expected_fourccs_size = 12,
+ },
+ {
+ .name = "random formats",
+ .native_fourccs = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ },
+ .native_fourccs_size = 10,
+ .expected = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XRGB8888,
+ },
+ .expected_fourccs_size = 10,
+ },
+};
+
+static void sysfb_build_fourcc_list_case_desc(struct sysfb_build_fourcc_list_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(sysfb_build_fourcc_list, sysfb_build_fourcc_list_cases,
+ sysfb_build_fourcc_list_case_desc);
+
+static void drm_test_sysfb_build_fourcc_list(struct kunit *test)
+{
+ const struct sysfb_build_fourcc_list_case *params = test->param_value;
+ u32 fourccs_out[TEST_BUF_SIZE] = {0};
+ size_t nfourccs_out;
+ struct drm_device *drm;
+ struct device *dev;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ nfourccs_out = drm_sysfb_build_fourcc_list(drm, params->native_fourccs,
+ params->native_fourccs_size,
+ fourccs_out, TEST_BUF_SIZE);
+
+ KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
+ KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
+}
+
+static struct kunit_case drm_sysfb_modeset_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_sysfb_build_fourcc_list, sysfb_build_fourcc_list_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_sysfb_modeset_test_suite = {
+ .name = "drm_sysfb_modeset_test",
+ .test_cases = drm_sysfb_modeset_test_cases,
+};
+
+kunit_test_suite(drm_sysfb_modeset_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_sysfb_modeset APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
index 312645271014..b6d6becf1683 100644
--- a/drivers/gpu/drm/tidss/Makefile
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -7,6 +7,7 @@ tidss-y := tidss_crtc.o \
tidss_irq.o \
tidss_plane.o \
tidss_scale_coefs.o \
- tidss_dispc.o
+ tidss_dispc.o \
+ tidss_oldi.o
obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 94f8e3178df5..a2f40a5c7703 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -130,7 +130,7 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
!to_tidss_crtc_state(cstate)->plane_pos_changed)
return;
- for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ for (layer = 0; layer < tidss->feat->num_vids ; layer++) {
struct drm_plane_state *pstate;
struct drm_plane *plane;
bool layer_active = false;
@@ -271,7 +271,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
* another videoport, the DSS will report sync lost issues. Disable all
* the layers here as a work-around.
*/
- for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ for (u32 layer = 0; layer < tidss->feat->num_vids; layer++)
dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
false);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index cacb5f3d8085..c0277fa36425 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -103,9 +103,16 @@ const struct dispc_features dispc_k2g_feats = {
},
},
- .num_planes = 1,
- .vid_name = { "vid1" },
- .vid_lite = { false },
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ },
+
.vid_order = { 0 },
};
@@ -139,7 +146,7 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
const struct dispc_features dispc_am65x_feats = {
.max_pclk_khz = {
[DISPC_VP_DPI] = 165000,
- [DISPC_VP_OLDI] = 165000,
+ [DISPC_VP_OLDI_AM65X] = 165000,
},
.scaling = {
@@ -169,7 +176,7 @@ const struct dispc_features dispc_am65x_feats = {
.vp_name = { "vp1", "vp2" },
.ovr_name = { "ovr1", "ovr2" },
.vpclk_name = { "vp1", "vp2" },
- .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+ .vp_bus_type = { DISPC_VP_OLDI_AM65X, DISPC_VP_DPI },
.vp_feat = { .color = {
.has_ctm = true,
@@ -178,11 +185,22 @@ const struct dispc_features dispc_am65x_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ },
+
+ .vid_order = {1, 0},
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -267,9 +285,32 @@ const struct dispc_features dispc_j721e_feats = {
.gamma_type = TIDSS_GAMMA_10BIT,
},
},
- .num_planes = 4,
- .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
- .vid_lite = { 0, 1, 0, 1, },
+
+ .num_vids = 4,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ {
+ .name = "vid2",
+ .is_lite = false,
+ .hw_id = 2,
+ },
+ {
+ .name = "vidl2",
+ .is_lite = true,
+ .hw_id = 3,
+ },
+ },
+
.vid_order = { 1, 3, 0, 2 },
};
@@ -315,11 +356,23 @@ const struct dispc_features dispc_am625_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
+
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
};
const struct dispc_features dispc_am62a7_feats = {
@@ -369,11 +422,58 @@ const struct dispc_features dispc_am62a7_feats = {
},
},
- .num_planes = 2,
- /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .num_vids = 2,
+
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
+};
+
+const struct dispc_features dispc_am62l_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .subrev = DISPC_AM62L,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {0},
};
static const u16 *dispc_common_regmap;
@@ -391,7 +491,7 @@ struct dispc_device {
void __iomem *base_ovr[TIDSS_MAX_PORTS];
void __iomem *base_vp[TIDSS_MAX_PORTS];
- struct regmap *oldi_io_ctrl;
+ struct regmap *am65x_oldi_io_ctrl;
struct clk *vp_clk[TIDSS_MAX_PORTS];
@@ -466,6 +566,29 @@ static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
return ioread32(base + reg);
}
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg)
+{
+ u32 count = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
+{
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+}
+
/*
* TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -734,7 +857,8 @@ static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -742,9 +866,10 @@ static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_id), stat);
}
static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
@@ -766,7 +891,8 @@ static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -774,9 +900,10 @@ static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_id), stat);
}
static
@@ -788,7 +915,8 @@ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
}
@@ -809,7 +937,7 @@ dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
status |= dispc_k3_vp_read_irqstatus(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
status |= dispc_k3_vid_read_irqstatus(dispc, i);
dispc_k3_clear_irqstatus(dispc, status);
@@ -825,7 +953,7 @@ static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
enable |= dispc_k3_vp_read_irqenable(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
enable |= dispc_k3_vid_read_irqenable(dispc, i);
return enable;
@@ -851,12 +979,15 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
main_disable |= BIT(i); /* VP IRQ */
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
+ u32 hw_id = dispc->feat->vid_info[i].hw_id;
+
dispc_k3_vid_set_irqenable(dispc, i, mask);
+
if (mask & DSS_IRQ_PLANE_MASK(i))
- main_enable |= BIT(i + 4); /* VID IRQ */
+ main_enable |= BIT(hw_id + 4); /* VID IRQ */
else
- main_disable |= BIT(i + 4); /* VID IRQ */
+ main_disable |= BIT(hw_id + 4); /* VID IRQ */
}
if (main_enable)
@@ -879,6 +1010,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -896,6 +1028,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -906,13 +1039,11 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
}
}
-enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
-
struct dispc_bus_format {
u32 bus_fmt;
u32 data_width;
bool is_oldi_fmt;
- enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+ enum oldi_mode_reg_val am65x_oldi_mode_reg_val;
};
static const struct dispc_bus_format dispc_bus_formats[] = {
@@ -956,7 +1087,7 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return -EINVAL;
}
- if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X &&
fmt->is_oldi_fmt) {
dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
__func__, dispc->feat->vp_name[hw_videoport]);
@@ -966,23 +1097,23 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return 0;
}
-static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+static void dispc_am65x_oldi_tx_power(struct dispc_device *dispc, bool power)
{
- u32 val = power ? 0 : OLDI_PWRDN_TX;
+ u32 val = power ? 0 : AM65X_OLDI_PWRDN_TX;
- if (WARN_ON(!dispc->oldi_io_ctrl))
+ if (WARN_ON(!dispc->am65x_oldi_io_ctrl))
return;
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
- OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT0_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT1_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT2_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT3_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_CLK_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
}
static void dispc_set_num_datalines(struct dispc_device *dispc,
@@ -1011,8 +1142,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
}
-static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
- const struct dispc_bus_format *fmt)
+static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
{
u32 oldi_cfg = 0;
u32 oldi_reset_bit = BIT(5 + hw_videoport);
@@ -1031,7 +1162,7 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1060,10 +1191,10 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
if (WARN_ON(!fmt))
return;
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
- dispc_oldi_tx_power(dispc, true);
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
+ dispc_am65x_oldi_tx_power(dispc, true);
- dispc_enable_oldi(dispc, hw_videoport, fmt);
+ dispc_enable_am65x_oldi(dispc, hw_videoport, fmt);
}
}
@@ -1119,7 +1250,7 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
align = true;
/* always use DE_HIGH for OLDI */
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X)
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
@@ -1145,10 +1276,10 @@ void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
{
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
- dispc_oldi_tx_power(dispc, false);
+ dispc_am65x_oldi_tx_power(dispc, false);
}
}
@@ -1310,7 +1441,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
* Calculate the percentage difference between the requested pixel clock rate
* and the effective rate resulting from calculating the clock divider value.
*/
-static
unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
{
int r = rate / 100, rr = real_rate / 100;
@@ -1358,8 +1488,10 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
x, 17, 6);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
@@ -1370,8 +1502,10 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
x, 13, 0);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
@@ -1388,6 +1522,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2025,7 +2160,7 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
bool need_scaling = state->src_w >> 16 != state->crtc_w ||
state->src_h >> 16 != state->crtc_h;
@@ -2096,7 +2231,7 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
@@ -2210,7 +2345,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2226,7 +2361,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2265,7 +2400,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2281,7 +2416,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2308,6 +2443,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2416,6 +2552,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2735,15 +2872,15 @@ static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
struct dispc_device *dispc)
{
- dispc->oldi_io_ctrl =
+ dispc->am65x_oldi_io_ctrl =
syscon_regmap_lookup_by_phandle(dev->of_node,
"ti,am65x-oldi-io-ctrl");
- if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
- dispc->oldi_io_ctrl = NULL;
- } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ if (PTR_ERR(dispc->am65x_oldi_io_ctrl) == -ENODEV) {
+ dispc->am65x_oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->am65x_oldi_io_ctrl)) {
dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
- __func__, PTR_ERR(dispc->oldi_io_ctrl));
- return PTR_ERR(dispc->oldi_io_ctrl);
+ __func__, PTR_ERR(dispc->am65x_oldi_io_ctrl));
+ return PTR_ERR(dispc->am65x_oldi_io_ctrl);
}
return 0;
}
@@ -2898,8 +3035,8 @@ int dispc_init(struct tidss_device *tidss)
if (r)
return r;
- for (i = 0; i < dispc->feat->num_planes; i++) {
- r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ for (i = 0; i < dispc->feat->num_vids; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_info[i].name,
&dispc->base_vid[i]);
if (r)
return r;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 086327d51a90..b8614f62186c 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,13 +46,19 @@ struct dispc_features_scaling {
u32 xinc_max;
};
+struct dispc_vid_info {
+ const char *name; /* Should match dt reg names */
+ u32 hw_id;
+ bool is_lite;
+};
+
struct dispc_errata {
bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
};
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
- DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_OLDI_AM65X, /* OLDI (LVDS) output for AM65x DSS */
DISPC_VP_INTERNAL, /* SoC internal routing */
DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
@@ -61,6 +67,7 @@ enum dispc_vp_bus_type {
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62L,
DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
@@ -82,18 +89,23 @@ struct dispc_features {
const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
struct tidss_vp_feat vp_feat;
- u32 num_planes;
- const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
- bool vid_lite[TIDSS_MAX_PLANES];
+ u32 num_vids;
+ struct dispc_vid_info vid_info[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
};
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
extern const struct dispc_features dispc_am62a7_feats;
+extern const struct dispc_features dispc_am62l_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg);
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport);
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate);
+
void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index e88148e44937..50a3f28250ef 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -226,18 +226,35 @@ enum dispc_common_regs {
#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+/* OLDI Config Bits (DISPC_VP_DSS_OLDI_CFG) */
+#define OLDI_ENABLE BIT(0)
+#define OLDI_MAP (BIT(1) | BIT(2) | BIT(3))
+#define OLDI_SRC BIT(4)
+#define OLDI_CLONE_MODE BIT(5)
+#define OLDI_MASTERSLAVE BIT(6)
+#define OLDI_DEPOL BIT(7)
+#define OLDI_MSB BIT(8)
+#define OLDI_LBEN BIT(9)
+#define OLDI_LBDATA BIT(10)
+#define OLDI_DUALMODESYNC BIT(11)
+#define OLDI_SOFTRST BIT(12)
+#define OLDI_TPATCFG BIT(13)
+
+/* LVDS Format values for OLDI_MAP field in DISPC_VP_OLDI_CFG register */
+enum oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
/*
* OLDI IO_CTRL register offsets. On AM654 the registers are found
* from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
* CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
* register range.
*/
-#define OLDI_DAT0_IO_CTRL 0x00
-#define OLDI_DAT1_IO_CTRL 0x04
-#define OLDI_DAT2_IO_CTRL 0x08
-#define OLDI_DAT3_IO_CTRL 0x0C
-#define OLDI_CLK_IO_CTRL 0x10
+#define AM65X_OLDI_DAT0_IO_CTRL 0x00
+#define AM65X_OLDI_DAT1_IO_CTRL 0x04
+#define AM65X_OLDI_DAT2_IO_CTRL 0x08
+#define AM65X_OLDI_DAT3_IO_CTRL 0x0C
+#define AM65X_OLDI_CLK_IO_CTRL 0x10
-#define OLDI_PWRDN_TX BIT(8)
+#define AM65X_OLDI_PWRDN_TX BIT(8)
#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d4652e8cc28c..a1b12e52aca4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -24,6 +24,7 @@
#include "tidss_drv.h"
#include "tidss_kms.h"
#include "tidss_irq.h"
+#include "tidss_oldi.h"
/* Power management */
@@ -147,6 +148,10 @@ static int tidss_probe(struct platform_device *pdev)
return ret;
}
+ ret = tidss_oldi_init(tidss);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init OLDI\n");
+
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
@@ -203,6 +208,8 @@ err_runtime_suspend:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
return ret;
}
@@ -227,6 +234,8 @@ static void tidss_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
/* devm allocated dispc goes away with the dev so mark it NULL */
dispc_remove(tidss);
@@ -242,6 +251,7 @@ static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
+ { .compatible = "ti,am62l-dss", .data = &dispc_am62l_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index 7f4f4282bc04..d14d5d28f0a3 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -11,8 +11,10 @@
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
+#define TIDSS_MAX_OLDI_TXES 2
typedef u32 dispc_irq_t;
+struct tidss_oldi;
struct tidss_device {
struct drm_device ddev; /* DRM device for DSS */
@@ -27,6 +29,9 @@ struct tidss_device {
unsigned int num_planes;
struct drm_plane *planes[TIDSS_MAX_PLANES];
+ unsigned int num_oldis;
+ struct tidss_oldi *oldis[TIDSS_MAX_OLDI_TXES];
+
unsigned int irq;
/* protects the irq masks field and irqenable/irqstatus registers */
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 17a86bed8054..81a04f767770 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -34,11 +34,12 @@ static inline struct tidss_encoder
}
static int tidss_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
- return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ return drm_bridge_attach(encoder, t_enc->next_bridge,
bridge, flags);
}
@@ -89,14 +90,18 @@ int tidss_encoder_create(struct tidss_device *tidss,
struct drm_connector *connector;
int ret;
- t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder,
- encoder, encoder_type);
+ t_enc = devm_drm_bridge_alloc(tidss->dev, struct tidss_encoder,
+ bridge, &tidss_bridge_funcs);
if (IS_ERR(t_enc))
return PTR_ERR(t_enc);
+ ret = drm_simple_encoder_init(&tidss->ddev, &t_enc->encoder,
+ encoder_type);
+ if (ret)
+ return ret;
+
t_enc->tidss = tidss;
t_enc->next_bridge = next_bridge;
- t_enc->bridge.funcs = &tidss_bridge_funcs;
enc = &t_enc->encoder;
enc->possible_crtcs = possible_crtcs;
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index f371518f8697..c34eb90cddbe 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -115,7 +115,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
const struct dispc_features *feat = tidss->feat;
u32 max_vps = feat->num_vps;
- u32 max_planes = feat->num_planes;
+ u32 max_planes = feat->num_vids;
struct pipe pipes[TIDSS_MAX_PORTS];
u32 num_pipes = 0;
@@ -144,7 +144,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
dev_dbg(dev, "Setting up panel for port %d\n", i);
switch (feat->vp_bus_type[i]) {
- case DISPC_VP_OLDI:
+ case DISPC_VP_OLDI_AM65X:
enc_type = DRM_MODE_ENCODER_LVDS;
conn_type = DRM_MODE_CONNECTOR_LVDS;
break;
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
new file mode 100644
index 000000000000..8f25159d0666
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/mfd/syscon.h>
+#include <linux/media-bus-format.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+
+#include "tidss_dispc.h"
+#include "tidss_dispc_regs.h"
+#include "tidss_oldi.h"
+
+struct tidss_oldi {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+
+ enum tidss_oldi_link_type link_type;
+ const struct oldi_bus_format *bus_format;
+ u32 oldi_instance;
+ int companion_instance; /* -1 when OLDI TX operates in Single-Link */
+ u32 parent_vp;
+
+ struct clk *serial;
+ struct regmap *io_ctrl;
+};
+
+struct oldi_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ enum oldi_mode_reg_val oldi_mode_reg_val;
+ u32 input_bus_fmt;
+};
+
+static const struct oldi_bus_format oldi_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, SPWG_18, MEDIA_BUS_FMT_RGB666_1X18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, SPWG_24, MEDIA_BUS_FMT_RGB888_1X24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, JEIDA_24, MEDIA_BUS_FMT_RGB888_1X24 },
+};
+
+#define OLDI_IDLE_CLK_HZ 25000000 /*25 MHz */
+
+static inline struct tidss_oldi *
+drm_bridge_to_tidss_oldi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tidss_oldi, bridge);
+}
+
+static int tidss_oldi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (!oldi->next_bridge) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u Failure attach next bridge\n",
+ __func__, oldi->oldi_instance);
+ return -ENODEV;
+ }
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u DRM_BRIDGE_ATTACH_NO_CONNECTOR is mandatory.\n",
+ __func__, oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ return drm_bridge_attach(encoder, oldi->next_bridge, bridge, flags);
+}
+
+static int
+tidss_oldi_set_serial_clk(struct tidss_oldi *oldi, unsigned long rate)
+{
+ unsigned long new_rate;
+ int ret;
+
+ ret = clk_set_rate(oldi->serial, rate);
+ if (ret) {
+ dev_err(oldi->dev,
+ "OLDI%u: failed to set serial clk rate to %lu Hz\n",
+ oldi->oldi_instance, rate);
+ return ret;
+ }
+
+ new_rate = clk_get_rate(oldi->serial);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(oldi->dev,
+ "OLDI%u Clock rate %lu differs over 5%% from requested %lu\n",
+ oldi->oldi_instance, new_rate, rate);
+
+ dev_dbg(oldi->dev, "OLDI%u: new rate %lu Hz (requested %lu Hz)\n",
+ oldi->oldi_instance, clk_get_rate(oldi->serial), rate);
+
+ return 0;
+}
+
+static void tidss_oldi_tx_power(struct tidss_oldi *oldi, bool enable)
+{
+ u32 mask;
+
+ /*
+ * The power control bits are Active Low, and remain powered off by
+ * default. That is, the bits are set to 1. To power on the OLDI TXes,
+ * the bits must be cleared to 0. Since there are cases where not all
+ * OLDI TXes are being used, the power logic selectively powers them
+ * on.
+ * Setting the variable 'val' to particular bit masks, makes sure that
+ * the undesired OLDI TXes remain powered off.
+ */
+
+ if (enable) {
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* Power-on only the required OLDI TX's IO*/
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ /* Power-on both the OLDI TXes' IOs */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ default:
+ /*
+ * This code execution should never reach here as any
+ * OLDI with an unsupported OLDI mode would never get
+ * registered in the first place.
+ * However, power-off the OLDI in concern just in case.
+ */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ enable = false;
+ break;
+ }
+ } else {
+ switch (oldi->link_type) {
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_SINGLE_LINK:
+ default:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ break;
+ }
+ }
+
+ regmap_update_bits(oldi->io_ctrl, OLDI_PD_CTRL, mask, enable ? 0 : mask);
+}
+
+static int tidss_oldi_config(struct tidss_oldi *oldi)
+{
+ const struct oldi_bus_format *bus_fmt = NULL;
+ u32 oldi_cfg = 0;
+ int ret;
+
+ bus_fmt = oldi->bus_format;
+
+ /*
+ * MASTERSLAVE and SRC bits of OLDI Config are always set to 0.
+ */
+
+ if (bus_fmt->data_width == 24)
+ oldi_cfg |= OLDI_MSB;
+ else if (bus_fmt->data_width != 18)
+ dev_warn(oldi->dev,
+ "OLDI%u: DSS port width %d not supported\n",
+ oldi->oldi_instance, bus_fmt->data_width);
+
+ oldi_cfg |= OLDI_DEPOL;
+
+ oldi_cfg = (oldi_cfg & (~OLDI_MAP)) | (bus_fmt->oldi_mode_reg_val << 1);
+
+ oldi_cfg |= OLDI_SOFTRST;
+
+ oldi_cfg |= OLDI_ENABLE;
+
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* All configuration is done for this mode. */
+ break;
+
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ oldi_cfg |= OLDI_CLONE_MODE;
+ break;
+
+ case OLDI_MODE_DUAL_LINK:
+ /* data-mapping field also indicates dual-link mode */
+ oldi_cfg |= BIT(3);
+ oldi_cfg |= OLDI_DUALMODESYNC;
+ break;
+
+ default:
+ dev_err(oldi->dev, "OLDI%u: Unsupported mode.\n",
+ oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ ret = tidss_configure_oldi(oldi->tidss, oldi->parent_vp, oldi_cfg);
+ if (ret == -ETIMEDOUT)
+ dev_warn(oldi->dev, "OLDI%u: timeout waiting for OLDI reset done.\n",
+ oldi->oldi_instance);
+
+ return ret;
+}
+
+static void tidss_oldi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ /* Configure the OLDI params*/
+ tidss_oldi_config(oldi);
+
+ /* Set the OLDI serial clock (7 times the pixel clock) */
+ tidss_oldi_set_serial_clk(oldi, mode->clock * 7 * 1000);
+
+ /* Enable OLDI IO power */
+ tidss_oldi_tx_power(oldi, true);
+}
+
+static void tidss_oldi_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ /* Disable OLDI IO power */
+ tidss_oldi_tx_power(oldi, false);
+
+ /* Set the OLDI serial clock to IDLE Frequency */
+ tidss_oldi_set_serial_clk(oldi, OLDI_IDLE_CLK_HZ);
+
+ /* Clear OLDI Config */
+ tidss_disable_oldi(oldi->tidss, oldi->parent_vp);
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ u32 *input_fmts;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0; i < ARRAY_SIZE(oldi_bus_formats); i++)
+ if (oldi_bus_formats[i].bus_fmt == output_fmt)
+ break;
+
+ if (i == ARRAY_SIZE(oldi_bus_formats))
+ return NULL;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = oldi_bus_formats[i].input_bus_fmt;
+ oldi->bus_format = &oldi_bus_formats[i];
+
+ return input_fmts;
+}
+
+static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = {
+ .attach = tidss_oldi_bridge_attach,
+ .atomic_pre_enable = tidss_oldi_atomic_pre_enable,
+ .atomic_post_disable = tidss_oldi_atomic_post_disable,
+ .atomic_get_input_bus_fmts = tidss_oldi_atomic_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance)
+{
+ struct device_node *companion;
+ struct device_node *port0, *port1;
+ u32 companion_reg;
+ bool secondary_oldi = false;
+ int pixel_order;
+
+ /*
+ * Find if the OLDI is paired with another OLDI for combined OLDI
+ * operation (dual-link or clone).
+ */
+ companion = of_parse_phandle(oldi_tx, "ti,companion-oldi", 0);
+ if (!companion)
+ /*
+ * The OLDI TX does not have a companion, nor is it a
+ * secondary OLDI. It will operate independently.
+ */
+ return OLDI_MODE_SINGLE_LINK;
+
+ if (of_property_read_u32(companion, "reg", &companion_reg))
+ return OLDI_MODE_UNSUPPORTED;
+
+ if (companion_reg > (TIDSS_MAX_OLDI_TXES - 1))
+ /* Invalid companion OLDI reg value. */
+ return OLDI_MODE_UNSUPPORTED;
+
+ *companion_instance = (int)companion_reg;
+
+ if (of_property_read_bool(oldi_tx, "ti,secondary-oldi"))
+ secondary_oldi = true;
+
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes, the
+ * OLDI TX ports are connected to. If they are marked as expecting
+ * even pixels and odd pixels, then we need to enable dual-link.
+ */
+ port0 = of_graph_get_port_by_id(oldi_tx, 1);
+ port1 = of_graph_get_port_by_id(companion, 1);
+ pixel_order = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+ of_node_put(port0);
+ of_node_put(port1);
+ of_node_put(companion);
+
+ switch (pixel_order) {
+ case -EINVAL:
+ /*
+ * The dual-link properties were not found in at least
+ * one of the sink nodes. Since 2 OLDI ports are present
+ * in the DT, it can be safely assumed that the required
+ * configuration is Clone Mode.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK :
+ OLDI_MODE_CLONE_SINGLE_LINK);
+
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ /*
+ * Primary OLDI can only support "ODD" pixels. So, from its
+ * perspective, the pixel order has to be ODD-EVEN.
+ */
+ return (secondary_oldi ? OLDI_MODE_UNSUPPORTED :
+ OLDI_MODE_DUAL_LINK);
+
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ /*
+ * Secondary OLDI can only support "EVEN" pixels. So, from its
+ * perspective, the pixel order has to be EVEN-ODD.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_DUAL_LINK :
+ OLDI_MODE_UNSUPPORTED);
+
+ default:
+ return OLDI_MODE_UNSUPPORTED;
+ }
+}
+
+static int get_parent_dss_vp(struct device_node *oldi_tx, u32 *parent_vp)
+{
+ struct device_node *ep, *dss_port;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(oldi_tx, OLDI_INPUT_PORT, -1);
+ if (ep) {
+ dss_port = of_graph_get_remote_port(ep);
+ if (!dss_port) {
+ ret = -ENODEV;
+ goto err_return_ep_port;
+ }
+
+ ret = of_property_read_u32(dss_port, "reg", parent_vp);
+
+ of_node_put(dss_port);
+err_return_ep_port:
+ of_node_put(ep);
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static const struct drm_bridge_timings default_tidss_oldi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+void tidss_oldi_deinit(struct tidss_device *tidss)
+{
+ for (int i = 0; i < tidss->num_oldis; i++) {
+ if (tidss->oldis[i]) {
+ drm_bridge_remove(&tidss->oldis[i]->bridge);
+ tidss->oldis[i] = NULL;
+ }
+ }
+}
+
+int tidss_oldi_init(struct tidss_device *tidss)
+{
+ struct tidss_oldi *oldi;
+ struct device_node *child;
+ struct drm_bridge *bridge;
+ u32 parent_vp, oldi_instance;
+ int companion_instance = -1;
+ enum tidss_oldi_link_type link_type = OLDI_MODE_UNSUPPORTED;
+ struct device_node *oldi_parent;
+ int ret = 0;
+
+ tidss->num_oldis = 0;
+
+ oldi_parent = of_get_child_by_name(tidss->dev->of_node, "oldi-transmitters");
+ if (!oldi_parent)
+ /* Return gracefully */
+ return 0;
+
+ for_each_available_child_of_node(oldi_parent, child) {
+ ret = get_parent_dss_vp(child, &parent_vp);
+ if (ret) {
+ if (ret == -ENODEV) {
+ /*
+ * ENODEV means that this particular OLDI node
+ * is not connected with the DSS, which is not
+ * a harmful case. There could be another OLDI
+ * which may still be connected.
+ * Continue to search for that.
+ */
+ ret = 0;
+ continue;
+ }
+ goto err_put_node;
+ }
+
+ ret = of_property_read_u32(child, "reg", &oldi_instance);
+ if (ret)
+ goto err_put_node;
+
+ /*
+ * Now that it's confirmed that OLDI is connected with DSS,
+ * let's continue getting the OLDI sinks ahead and other OLDI
+ * properties.
+ */
+ bridge = devm_drm_of_get_bridge(tidss->dev, child,
+ OLDI_OUTPUT_PORT, 0);
+ if (IS_ERR(bridge)) {
+ /*
+ * Either there was no OLDI sink in the devicetree, or
+ * the OLDI sink has not been added yet. In any case,
+ * return.
+ * We don't want to have an OLDI node connected to DSS
+ * but not to any sink.
+ */
+ ret = dev_err_probe(tidss->dev, PTR_ERR(bridge),
+ "no panel/bridge for OLDI%u.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ link_type = get_oldi_mode(child, &companion_instance);
+ if (link_type == OLDI_MODE_UNSUPPORTED) {
+ ret = dev_err_probe(tidss->dev, -EINVAL,
+ "OLDI%u: Unsupported OLDI connection.\n",
+ oldi_instance);
+ goto err_put_node;
+ } else if ((link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) ||
+ (link_type == OLDI_MODE_CLONE_SINGLE_LINK)) {
+ /*
+ * The OLDI driver cannot support OLDI clone mode
+ * properly at present.
+ * The clone mode requires 2 working encoder-bridge
+ * pipelines, generating from the same crtc. The DRM
+ * framework does not support this at present. If
+ * there were to be, say, 2 OLDI sink bridges each
+ * connected to an OLDI TXes, they couldn't both be
+ * supported simultaneously.
+ * This driver still has some code pertaining to OLDI
+ * clone mode configuration in DSS hardware for future,
+ * when there is a better infrastructure in the DRM
+ * framework to support 2 encoder-bridge pipelines
+ * simultaneously.
+ * Till that time, this driver shall error out if it
+ * detects a clone mode configuration.
+ */
+ ret = dev_err_probe(tidss->dev, -EOPNOTSUPP,
+ "The OLDI driver does not support Clone Mode at present.\n");
+ goto err_put_node;
+ } else if (link_type == OLDI_MODE_SECONDARY_DUAL_LINK) {
+ /*
+ * This is the secondary OLDI node, which serves as a
+ * companion to the primary OLDI, when it is configured
+ * for the dual-link mode. Since the primary OLDI will
+ * be a part of bridge chain, no need to put this one
+ * too. Continue onto the next OLDI node.
+ */
+ continue;
+ }
+
+ oldi = devm_drm_bridge_alloc(tidss->dev, struct tidss_oldi, bridge,
+ &tidss_oldi_bridge_funcs);
+ if (IS_ERR(oldi)) {
+ ret = PTR_ERR(oldi);
+ goto err_put_node;
+ }
+
+ oldi->parent_vp = parent_vp;
+ oldi->oldi_instance = oldi_instance;
+ oldi->companion_instance = companion_instance;
+ oldi->link_type = link_type;
+ oldi->dev = tidss->dev;
+ oldi->next_bridge = bridge;
+
+ /*
+ * Only the primary OLDI needs to reference the io-ctrl system
+ * registers, and the serial clock.
+ * We don't require a check for secondary OLDI in dual-link mode
+ * because the driver will not create a drm_bridge instance.
+ * But the driver will need to create a drm_bridge instance,
+ * for secondary OLDI in clone mode (once it is supported).
+ */
+ if (link_type != OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) {
+ oldi->io_ctrl = syscon_regmap_lookup_by_phandle(child,
+ "ti,oldi-io-ctrl");
+ if (IS_ERR(oldi->io_ctrl)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->io_ctrl),
+ "OLDI%u: syscon_regmap_lookup_by_phandle failed.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ oldi->serial = of_clk_get_by_name(child, "serial");
+ if (IS_ERR(oldi->serial)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->serial),
+ "OLDI%u: Failed to get serial clock.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+ }
+
+ /* Register the bridge. */
+ oldi->bridge.of_node = child;
+ oldi->bridge.driver_private = oldi;
+ oldi->bridge.timings = &default_tidss_oldi_timings;
+
+ tidss->oldis[tidss->num_oldis++] = oldi;
+ oldi->tidss = tidss;
+
+ drm_bridge_add(&oldi->bridge);
+ }
+
+ of_node_put(child);
+ of_node_put(oldi_parent);
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ of_node_put(oldi_parent);
+ return ret;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.h b/drivers/gpu/drm/tidss/tidss_oldi.h
new file mode 100644
index 000000000000..8cd535c5ee65
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#ifndef __TIDSS_OLDI_H__
+#define __TIDSS_OLDI_H__
+
+#include "tidss_drv.h"
+
+struct tidss_oldi;
+
+/* OLDI PORTS */
+#define OLDI_INPUT_PORT 0
+#define OLDI_OUTPUT_PORT 1
+
+/* Control MMR Registers */
+
+/* Register offsets */
+#define OLDI_PD_CTRL 0x100
+#define OLDI_LB_CTRL 0x104
+
+/* Power control bits */
+#define OLDI_PWRDOWN_TX(n) BIT(n)
+
+/* LVDS Bandgap reference Enable/Disable */
+#define OLDI_PWRDN_BG BIT(8)
+
+enum tidss_oldi_link_type {
+ OLDI_MODE_UNSUPPORTED,
+ OLDI_MODE_SINGLE_LINK,
+ OLDI_MODE_CLONE_SINGLE_LINK,
+ OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK,
+ OLDI_MODE_DUAL_LINK,
+ OLDI_MODE_SECONDARY_DUAL_LINK,
+};
+
+int tidss_oldi_init(struct tidss_device *tidss);
+void tidss_oldi_deinit(struct tidss_device *tidss);
+
+#endif /* __TIDSS_OLDI_H__ */
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 719412e6c346..142ae81951a0 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -200,7 +200,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
struct tidss_plane *tplane;
enum drm_plane_type type;
u32 possible_crtcs;
- u32 num_planes = tidss->feat->num_planes;
+ u32 num_planes = tidss->feat->num_vids;
u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709));
u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 54c84c9801c1..06e54694a7f2 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,6 +3,7 @@
config DRM_APPLETBDRM
tristate "DRM support for Apple Touch Bars"
depends on DRM && USB && MMU
+ depends on X86 || COMPILE_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -37,7 +38,7 @@ config DRM_BOCHS
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -65,20 +66,6 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
-config DRM_OFDRM
- tristate "Open Firmware display driver"
- depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for Open Firmware framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the Open Firmware before the kernel boots. Scanout buffer, size,
- and display format must be provided via device tree.
-
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
@@ -95,24 +82,6 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
-config DRM_SIMPLEDRM
- tristate "Simple framebuffer driver"
- depends on DRM && MMU
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for simple platform-provided framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the firmware or bootloader before the kernel boots. Scanout
- buffer, size, and display format must be provided via device tree,
- UEFI, VESA, etc.
-
- On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
- to use UEFI and VESA framebuffers.
-
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
@@ -230,32 +199,3 @@ config TINYDRM_SHARP_MEMORY
* 4.40" Sharp Memory LCD (LS044Q7DH01)
If M is selected the module will be called sharp_memory.
-
-config TINYDRM_ST7586
- tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- help
- DRM driver for the following Sitronix ST7586 panels:
- * LEGO MINDSTORMS EV3
-
- If M is selected the module will be called st7586.
-
-config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- select BACKLIGHT_CLASS_DEVICE
- help
- DRM driver for Sitronix ST7715R/ST7735R with one of the following
- LCDs:
- * Jianda JD-T18003-T01 1.8" 128x160 TFT
- * Okaya RH128128T 1.44" 128x128 TFT
-
- If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 0a3a7837a58b..4a9ff61ec254 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -5,9 +5,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
-obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
-obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
@@ -16,5 +14,3 @@ obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o
-obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
-obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 4370ba22dd88..751b05753c94 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -45,7 +45,7 @@
#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
-#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev))
struct appletbdrm_msg_request_header {
__le16 unk_00;
@@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response {
} __packed;
struct appletbdrm_device {
- struct device *dmadev;
-
unsigned int in_ep;
unsigned int out_ep;
@@ -214,7 +212,7 @@ retry:
}
if (response->msg != expected_response) {
- drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ drm_err(drm, "Unexpected response from device (expected %p4cl found %p4cl)\n",
&expected_response, &response->msg);
return -EIO;
}
@@ -288,7 +286,7 @@ static int appletbdrm_get_information(struct appletbdrm_device *adev)
}
if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
- drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ drm_err(drm, "Encountered unknown pixel format (%p4cl)\n", &pixel_format);
ret = -EINVAL;
goto free_info;
}
@@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct appletbdrm_device *adev = drm_to_adev(dev);
-
- if (!adev->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
static const struct drm_driver appletbdrm_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = appletbdrm_driver_gem_prime_import,
.name = "appletbdrm",
.desc = "Apple Touch Bar DRM Driver",
.major = 1,
@@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf,
struct device *dev = &intf->dev;
struct appletbdrm_device *adev;
struct drm_device *drm = NULL;
+ struct device *dma_dev;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
@@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf,
adev->in_ep = bulk_in->bEndpointAddress;
adev->out_ep = bulk_out->bEndpointAddress;
- adev->dmadev = dev;
drm = &adev->drm;
usb_set_intfdata(intf, adev);
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ drm_warn(drm, "buffer sharing not supported"); /* not an error */
+ }
+
ret = appletbdrm_get_information(adev);
if (ret) {
drm_err(drm, "Failed to get display information\n");
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8706763af8fb..8d3b7c4fa6a4 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
+#include <drm/drm_panic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -469,10 +470,28 @@ static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane,
bochs_hw_setformat(bochs, fb->format);
}
+static int bochs_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct bochs_device *bochs = to_bochs_device(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
+ }
+ return -ENODEV;
+}
+
static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = bochs_primary_plane_helper_atomic_check,
.atomic_update = bochs_primary_plane_helper_atomic_update,
+ .get_scanout_buffer = bochs_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs bochs_primary_plane_funcs = {
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 52ec1e4ea9e5..97a93adc5669 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -70,20 +70,6 @@ struct cirrus_device {
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
-struct cirrus_primary_plane_state {
- struct drm_shadow_plane_state base;
-
- /* HW scanout buffer */
- const struct drm_format_info *format;
- unsigned int pitch;
-};
-
-static inline struct cirrus_primary_plane_state *
-to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
-{
- return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
-};
-
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
-{
- if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
- if (fb->width * 3 <= CIRRUS_MAX_PITCH)
- /* convert from XR24 to RG24 */
- return drm_format_info(DRM_FORMAT_RGB888);
- else
- /* convert from XR24 to RG16 */
- return drm_format_info(DRM_FORMAT_RGB565);
- }
- return NULL;
-}
-
-static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return format;
- return fb->format;
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return drm_format_info_min_pitch(format, 0, fb->width);
- return fb->pitches[0];
-}
-
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
@@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
- cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
@@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *new_primary_plane_state =
- to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
@@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- pitch = cirrus_pitch(fb);
-
/* validate size constraints */
- if (pitch > CIRRUS_MAX_PITCH)
+ if (fb->pitches[0] > CIRRUS_MAX_PITCH)
return -EINVAL;
- else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height)
return -EINVAL;
- new_primary_plane_state->format = cirrus_format(fb);
- new_primary_plane_state->pitch = pitch;
-
return 0;
}
@@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- const struct drm_format_info *format = primary_plane_state->format;
- unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct cirrus_primary_plane_state *old_primary_plane_state =
- to_cirrus_primary_plane_state(old_plane_state);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
@@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
- if (old_primary_plane_state->format != format)
- cirrus_format_set(cirrus, format);
- if (old_primary_plane_state->pitch != pitch)
- cirrus_pitch_set(cirrus, pitch);
+ if (!old_fb || old_fb->format != fb->format)
+ cirrus_format_set(cirrus, fb->format);
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ cirrus_pitch_set(cirrus, fb->pitches[0]);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
@@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static struct drm_plane_state *
-cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *plane_state = plane->state;
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
- struct cirrus_primary_plane_state *new_primary_plane_state;
- struct drm_shadow_plane_state *new_shadow_plane_state;
-
- if (!plane_state)
- return NULL;
-
- new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
- if (!new_primary_plane_state)
- return NULL;
- new_shadow_plane_state = &new_primary_plane_state->base;
-
- __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
- new_primary_plane_state->format = primary_plane_state->format;
- new_primary_plane_state->pitch = primary_plane_state->pitch;
-
- return &new_shadow_plane_state->base;
-}
-
-static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
-
- __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
- kfree(primary_plane_state);
-}
-
-static void cirrus_reset_primary_plane(struct drm_plane *plane)
-{
- struct cirrus_primary_plane_state *primary_plane_state;
-
- if (plane->state) {
- cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL; /* must be set to NULL here */
- }
-
- primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
- if (!primary_plane_state)
- return;
- __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
-}
-
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = cirrus_reset_primary_plane,
- .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
- .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
- uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ u64 pitch;
- if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > CIRRUS_MAX_PITCH)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -681,7 +578,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRIVER_NAME);
+ ret = pcim_request_all_regions(pdev, DRIVER_NAME);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 41e9bfb2e2ff..fb0004166f4a 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
- struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
-
- if (!gm12u320->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gm12u320_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
@@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
/*
@@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
- gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!gm12u320->dmadev)
+ dma_dev = usb_intf_get_dma_device(interface);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
- goto err_put_device;
+ return ret;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put_device;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put_device;
+ return ret;
drm_client_setup(dev, NULL);
return 0;
-
-err_put_device:
- put_device(gm12u320->dmadev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
- put_device(gm12u320->dmadev);
- gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 0460ecaef4bd..23914a9f7fd3 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_client_setup(drm, NULL);
+ if (bpp == 16)
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
+ else
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f8f20d2f6174..6c77550c51af 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -175,7 +175,7 @@ struct signal_timer {
static void signal_for_ttm_bo_reserve(struct timer_list *t)
{
- struct signal_timer *s_timer = from_timer(s_timer, t, timer);
+ struct signal_timer *s_timer = timer_container_of(s_timer, t, timer);
struct task_struct *task = s_timer->ctx->task;
do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
@@ -201,7 +201,7 @@ static int threaded_ttm_bo_reserve(void *arg)
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
timer_delete_sync(&s_timer.timer);
- destroy_timer_on_stack(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
ww_acquire_fini(&ctx);
@@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 3148f5d3dbd6..1bcc67977f48 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,14 +542,15 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_ASSERT_EQ(test, man->usage, size);
-
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, placement);
ttm_bo_reserve(bo, false, false, NULL);
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
err = ttm_bo_validate(bo, placement, &ctx);
ttm_bo_unreserve(bo);
@@ -757,56 +758,6 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
-static void ttm_bo_validate_swapout(struct kunit *test)
-{
- unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
- enum ttm_bo_type bo_type = ttm_bo_type_device;
- struct ttm_buffer_object *bo_small, *bo_big;
- struct ttm_test_devices *priv = test->priv;
- struct ttm_operation_ctx ctx = { };
- struct ttm_placement *placement;
- u32 mem_type = TTM_PL_TT;
- struct ttm_place *place;
- struct sysinfo si;
- int err;
-
- si_meminfo(&si);
- size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
-
- ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
-
- place = ttm_place_kunit_init(test, mem_type, 0);
- placement = ttm_placement_kunit_init(test, place, 1);
-
- bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, bo_small);
-
- drm_gem_private_object_init(priv->drm, &bo_small->base, size);
-
- err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
- PAGE_SIZE, &ctx, NULL, NULL,
- &dummy_ttm_bo_destroy);
- KUNIT_EXPECT_EQ(test, err, 0);
- dma_resv_unlock(bo_small->base.resv);
-
- bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
-
- dma_resv_lock(bo_big->base.resv, NULL);
- err = ttm_bo_validate(bo_big, placement, &ctx);
- dma_resv_unlock(bo_big->base.resv);
-
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
- KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
- KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
- KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
-
- ttm_bo_put(bo_big);
- ttm_bo_put(bo_small);
-
- ttm_mock_manager_fini(priv->ttm_dev, mem_type);
-}
-
static void ttm_bo_validate_happy_evict(struct kunit *test)
{
u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
@@ -1201,7 +1152,6 @@ static struct kunit_case ttm_bo_validate_test_cases[] = {
KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
ttm_bo_validate_wait_gen_params),
- KUNIT_CASE(ttm_bo_validate_swapout),
KUNIT_CASE(ttm_bo_validate_happy_evict),
KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index b91c13f46225..7aaf0d1395ff 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
index f6d1c8a2845d..d7eb6471f2ed 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index d27691f2e451..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 93c007f18855..32530c75f038 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -4,24 +4,12 @@
*/
#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
/*
- * Casting from randomized struct file * to struct ttm_backup * is fine since
- * struct ttm_backup is never defined nor dereferenced.
- */
-static struct file *ttm_backup_to_file(struct ttm_backup *backup)
-{
- return (void *)backup;
-}
-
-static struct ttm_backup *ttm_file_to_backup(struct file *file)
-{
- return (void *)file;
-}
-
-/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
*/
@@ -40,12 +28,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
-void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
- shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
@@ -55,16 +43,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
- * @intr: Try to perform waits interruptable or at least killable.
+ * @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
-int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
@@ -106,12 +93,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
* the folio size- and usage.
*/
s64
-ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
@@ -128,21 +114,14 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
if (writeback && !folio_mapped(to_folio) &&
folio_clear_dirty_for_io(to_folio)) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = SWAP_CLUSTER_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1,
- };
folio_set_reclaim(to_folio);
- ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ ret = shmem_writeout(to_folio, NULL, NULL);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
- * If writepage succeeds, it unlocks the folio.
- * writepage() errors are otherwise dropped, since writepage()
- * is only best effort here.
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
*/
if (ret)
folio_unlock(to_folio);
@@ -161,9 +140,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
-void ttm_backup_fini(struct ttm_backup *backup)
+void ttm_backup_fini(struct file *backup)
{
- fput(ttm_backup_to_file(backup));
+ fput(backup);
}
/**
@@ -194,14 +173,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
*
* Create a backup utilizing shmem objects.
*
- * Return: A pointer to a struct ttm_backup on success,
+ * Return: A pointer to a struct file on success,
* an error pointer on error.
*/
-struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+struct file *ttm_backup_shmem_create(loff_t size)
{
- struct file *filp;
-
- filp = shmem_file_setup("ttm shmem backup", size, 0);
-
- return ttm_file_to_backup(filp);
+ return shmem_file_setup("ttm shmem backup", size, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5bf3c969907c..f4d9e68b21e7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -35,6 +35,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -46,6 +47,7 @@
#include <linux/dma-resv.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
@@ -235,7 +237,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -270,7 +272,7 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
@@ -524,11 +526,11 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
return 0;
if (bo->deleted) {
- lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
if (!lret)
ttm_bo_cleanup_memtype_use(bo);
} else {
- lret = ttm_bo_evict(bo, walk->ctx);
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
}
if (lret)
@@ -564,8 +566,10 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_bo_evict_walk evict_walk = {
.walk = {
.ops = &ttm_evict_walk_ops,
- .ctx = ctx,
- .ticket = ticket,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
},
.place = place,
.evictor = evictor,
@@ -574,7 +578,7 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
};
s64 lret;
- evict_walk.walk.trylock_only = true;
+ evict_walk.walk.arg.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
/* One more attempt if we hit low limit? */
@@ -588,12 +592,12 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
/* Reset low limit */
evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
- evict_walk.walk.trylock_only = false;
+ evict_walk.walk.arg.trylock_only = false;
retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
- evict_walk.walk.ticket = ticket;
+ evict_walk.walk.arg.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
@@ -1104,7 +1108,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
- struct ttm_operation_ctx *ctx = walk->ctx;
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
s64 ret;
/*
@@ -1215,8 +1219,10 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
struct ttm_bo_swapout_walk swapout_walk = {
.walk = {
.ops = &ttm_swap_ops,
- .ctx = ctx,
- .trylock_only = true,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
},
.gfp_flags = gfp_flags,
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..9d8b747a34db
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15cab9bda17f..acbbca9d5c92 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,6 +28,8 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
@@ -37,6 +39,8 @@
#include <drm/drm_cache.h>
+#include "ttm_bo_internal.h"
+
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
@@ -254,6 +258,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+ if (ret) {
+ dma_resv_unlock(&fbo->base.base._resv);
+ kfree(fbo);
+ return ret;
+ }
+
if (fbo->base.resource) {
ttm_resource_set_bo(fbo->base.resource, &fbo->base);
bo->resource = NULL;
@@ -262,12 +273,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.bulk_move = NULL;
}
- ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
- if (ret) {
- kfree(fbo);
- return ret;
- }
-
ttm_bo_get(bo);
fbo->bo = bo;
@@ -378,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
}
/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
* ttm_bo_kmap
*
* @bo: The buffer object.
@@ -769,14 +800,15 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
- *needs_unlock = false;
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
return true;
}
@@ -788,27 +820,27 @@ static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
return false;
}
-static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
+ struct ttm_lru_walk_arg *arg = curs->arg;
struct dma_resv *resv = bo->base.resv;
int ret;
- if (walk->ctx->interruptible)
- ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
else
- ret = dma_resv_lock(resv, walk->ticket);
+ ret = dma_resv_lock(resv, arg->ticket);
if (!ret) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
/*
* Only a single ticketlock per loop. Ticketlocks are prone
* to return -EDEADLK causing the eviction to fail, so
* after waiting for the ticketlock, revert back to
* trylocking for this walk.
*/
- walk->ticket = NULL;
+ arg->ticket = NULL;
} else if (ret == -EDEADLK) {
/* Caller needs to exit the ww transaction. */
ret = -ENOSPC;
@@ -817,12 +849,6 @@ static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
return ret;
}
-static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
-{
- if (locked)
- dma_resv_unlock(bo->base.resv);
-}
-
/**
* ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
* valid items.
@@ -857,64 +883,21 @@ static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target)
{
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
s64 progress = 0;
s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_cursor_init(&cursor, man);
- ttm_resource_manager_for_each_res(&cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- bool bo_needs_unlock = false;
- bool bo_locked = false;
- int mem_type;
-
- /*
- * Attempt a trylock before taking a reference on the bo,
- * since if we do it the other way around, and the trylock fails,
- * we need to drop the lru lock to put the bo.
- */
- if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
- bo_locked = true;
- else if (!walk->ticket || walk->ctx->no_wait_gpu ||
- walk->trylock_only)
- continue;
-
- if (!ttm_bo_get_unless_zero(bo)) {
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- continue;
- }
-
- mem_type = res->mem_type;
- spin_unlock(&bdev->lru_lock);
-
- lret = 0;
- if (!bo_locked)
- lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
-
- /*
- * Note that in between the release of the lru lock and the
- * ticketlock, the bo may have switched resource,
- * and also memory type, since the resource may have been
- * freed and allocated again with a different memory type.
- * In that case, just skip it.
- */
- if (!lret && bo->resource && bo->resource->mem_type == mem_type)
- lret = walk->ops->process_bo(walk, bo);
-
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- ttm_bo_put(bo);
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
if (lret == -EBUSY || lret == -EALREADY)
lret = 0;
progress = (lret < 0) ? lret : progress + lret;
-
- spin_lock(&bdev->lru_lock);
if (progress < 0 || progress >= target)
break;
}
- ttm_resource_cursor_fini(&cursor);
- spin_unlock(&bdev->lru_lock);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
return progress;
}
@@ -952,44 +935,87 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
* ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
* @curs: The ttm_bo_lru_cursor to initialize.
* @man: The ttm resource_manager whose LRU lists to iterate over.
- * @ctx: The ttm_operation_ctx to govern the locking.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
*
- * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
- * or prelocked buffer objects are available as detailed by
- * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
- * supported.
+ * Initialize a struct ttm_bo_lru_cursor.
*
* Return: Pointer to @curs. The function does not fail.
*/
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx)
+ struct ttm_lru_walk_arg *arg)
{
memset(curs, 0, sizeof(*curs));
ttm_resource_cursor_init(&curs->res_curs, man);
- curs->ctx = ctx;
+ curs->arg = arg;
return curs;
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
static struct ttm_buffer_object *
-ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- struct ttm_buffer_object *bo = res->bo;
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
- if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
- return NULL;
+ ttm_bo_lru_cursor_cleanup_bo(curs);
- if (!ttm_bo_get_unless_zero(bo)) {
- if (curs->needs_unlock)
- dma_resv_unlock(bo->base.resv);
- return NULL;
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ spin_lock(lru_lock);
}
- curs->bo = bo;
- return bo;
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
}
/**
@@ -1003,25 +1029,7 @@ ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *cur
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_resource *res = NULL;
- struct ttm_buffer_object *bo;
-
- ttm_bo_lru_cursor_cleanup_bo(curs);
-
- spin_lock(lru_lock);
- for (;;) {
- res = ttm_resource_manager_next(&curs->res_curs);
- if (!res)
- break;
-
- bo = ttm_bo_from_res_reserved(res, curs);
- if (bo)
- break;
- }
-
- spin_unlock(lru_lock);
- return res ? bo : NULL;
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
@@ -1035,21 +1043,8 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_buffer_object *bo;
- struct ttm_resource *res;
-
- spin_lock(lru_lock);
- res = ttm_resource_manager_first(&curs->res_curs);
- if (!res) {
- spin_unlock(lru_lock);
- return NULL;
- }
-
- bo = ttm_bo_from_res_reserved(res, curs);
- spin_unlock(lru_lock);
-
- return bo ? bo : ttm_bo_lru_cursor_next(curs);
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a194db83421d..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/export.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
@@ -220,7 +222,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .force_alloc = true
};
ttm = bo->ttm;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 02e797fd1891..c3e2fcbdd2cc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
@@ -36,6 +37,7 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
/*
* ttm_global_mutex - protecting the global state
@@ -123,6 +125,28 @@ out:
return ret;
}
+/**
+ * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation.
+ *
+ * @bdev: A pointer to a struct ttm_device to prepare hibernation for.
+ *
+ * Return: 0 on success, negative number on failure.
+ */
+int ttm_device_prepare_hibernation(struct ttm_device *bdev)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ int ret;
+
+ do {
+ ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
+ } while (ret > 0);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_device_prepare_hibernation);
+
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index f1c60fa80c2d..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 83b10706ba89..baf27c70a419 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -31,6 +31,7 @@
* cause they are rather slow compared to alloc_pages+map.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -506,7 +507,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
* if successful, populate the page-table and dma-address arrays.
*/
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
- struct ttm_backup *backup,
+ struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
@@ -655,7 +656,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +964,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
struct page *page;
unsigned long handle;
gfp_t alloc_gfp;
@@ -1132,7 +1133,9 @@ void ttm_pool_fini(struct ttm_pool *pool)
}
EXPORT_SYMBOL(ttm_pool_fini);
-/* As long as pages are available make sure to release at least one */
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
+
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -1140,9 +1143,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do
num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
- return num_freed;
+ return num_freed ?: SHRINK_STOP;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
@@ -1233,7 +1239,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc) {
+ if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -1242,7 +1248,12 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- seq_puts(m, "DMA ");
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (pool->use_dma_alloc)
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
@@ -1266,10 +1277,15 @@ EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
- seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
@@ -1324,6 +1340,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker->count_objects = ttm_pool_shrinker_count;
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
mm_shrinker->seeks = 1;
shrinker_register(mm_shrinker);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..db854b581d83 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7e5a60c55813..e2c82ad07eb4 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -23,6 +23,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
@@ -548,7 +549,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = true
};
struct dma_fence *fence;
int ret;
@@ -558,6 +558,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
cond_resched();
} while (!ret);
+ if (ret && ret != -ENOENT)
+ return ret;
+
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index df0aa6c4b8b8..506e257dfba8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -33,6 +33,7 @@
#include <linux/cc_platform.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -544,7 +545,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
*/
int ttm_tt_setup_backup(struct ttm_tt *tt)
{
- struct ttm_backup *backup =
+ struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 05b3a152cc33..1922988625eb 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -22,13 +22,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
- udl_sync_pending_urbs(dev);
+ udl_sync_pending_urbs(udl);
return 0;
}
@@ -49,22 +50,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct udl_device *udl = to_udl(dev);
-
- if (!udl->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -73,7 +58,6 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = udl_driver_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
@@ -126,10 +110,10 @@ static int udl_usb_probe(struct usb_interface *interface,
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
- drm_kms_helper_poll_fini(dev);
- udl_drop_usb(dev);
drm_dev_unplug(dev);
+ udl_drop_usb(udl);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index be00dc1d87a1..145bb95ccc48 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,18 +50,14 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
- struct device *dev;
- struct device *dmadev;
+
+ unsigned long sku_pixel_limit;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct mutex gem_lock;
-
- int sku_pixel_limit;
-
struct urb_list urbs;
};
@@ -73,22 +69,22 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl)
}
/* modeset */
-int udl_modeset_init(struct drm_device *dev);
+int udl_modeset_init(struct udl_device *udl);
struct drm_connector *udl_connector_init(struct drm_device *dev);
-struct urb *udl_get_urb(struct drm_device *dev);
+struct urb *udl_get_urb(struct udl_device *udl);
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
-void udl_sync_pending_urbs(struct drm_device *dev);
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct udl_device *udl);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
-int udl_drop_usb(struct drm_device *dev);
+int udl_drop_usb(struct udl_device *udl);
int udl_select_std_channel(struct udl_device *udl);
#endif
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 3ebe2ce55dfd..bc58991a6f14 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -8,6 +8,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/unaligned.h>
+
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -23,72 +25,99 @@
#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000
+
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+/*
+ * Try to make sense of whatever we parse. Therefore return @end on
+ * errors, but don't fail hard.
+ */
+static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
+{
+ u16 key;
+ u8 len;
+
+ /* read key */
+ if (pos >= end - 2)
+ return end;
+ key = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* read value length */
+ if (pos >= end - 1)
+ return end;
+ len = *pos++;
+
+ /* read value */
+ if (pos >= end - len)
+ return end;
+ switch (key) {
+ case 0x0200: { /* maximum number of pixels */
+ unsigned int sku_pixel_limit;
+
+ if (len < sizeof(__le32))
+ break;
+ sku_pixel_limit = get_unaligned_le32(pos);
+ if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
+ break; /* almost 100 MiB, so probably bogus */
+ udl->sku_pixel_limit = sku_pixel_limit;
+ break;
+ }
+ default:
+ break;
+ }
+ pos += len;
+
+ return pos;
+}
+
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
+ struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
- char *desc;
- char *buf;
- char *desc_end;
-
- u8 total_len = 0;
+ bool detected = false;
+ void *buf;
+ int ret;
+ unsigned int len;
+ const u8 *desc;
+ const u8 *desc_end;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
- return false;
+ return -ENOMEM;
+
+ ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
+ 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (ret < 0)
+ goto out;
+ len = ret;
+
+ if (len < 5)
+ goto out;
+
desc = buf;
+ desc_end = desc + len;
- total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
- if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%11ph\n",
- total_len, desc);
-
- if ((desc[0] != total_len) || /* descriptor length */
- (desc[1] != 0x5f) || /* vendor descriptor type */
- (desc[2] != 0x01) || /* version (2 bytes) */
- (desc[3] != 0x00) ||
- (desc[4] != total_len - 2)) /* length after type */
- goto unrecognized;
-
- desc_end = desc + total_len;
- desc += 5; /* the fixed header we've already parsed */
-
- while (desc < desc_end) {
- u8 length;
- u16 key;
-
- key = le16_to_cpu(*((u16 *) desc));
- desc += sizeof(u16);
- length = *desc;
- desc++;
-
- switch (key) {
- case 0x0200: { /* max_area */
- u32 max_area;
- max_area = le32_to_cpu(*((u32 *)desc));
- DRM_DEBUG("DL chip limited to %d pixel modes\n",
- max_area);
- udl->sku_pixel_limit = max_area;
- break;
- }
- default:
- break;
- }
- desc += length;
- }
- }
+ if ((desc[0] != len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != len - 2)) /* length after type */
+ goto out;
+ desc += 5;
- goto success;
+ detected = true;
-unrecognized:
- /* allow udlfb to load for now even if firmware unrecognized */
- DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+ while (desc < desc_end)
+ desc = udl_parse_key_value_pair(udl, desc, desc_end);
-success:
+out:
+ if (!detected)
+ drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
kfree(buf);
- return true;
+
+ return 0;
}
/*
@@ -145,9 +174,8 @@ void udl_urb_completion(struct urb *urb)
wake_up(&udl->urbs.sleep);
}
-static void udl_free_urb_list(struct drm_device *dev)
+static void udl_free_urb_list(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb_node *unode;
struct urb *urb;
@@ -172,9 +200,8 @@ static void udl_free_urb_list(struct drm_device *dev)
wake_up_all(&udl->urbs.sleep);
}
-static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -210,7 +237,7 @@ retry:
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
- udl_free_urb_list(dev);
+ udl_free_urb_list(udl);
goto retry;
}
break;
@@ -259,9 +286,8 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
}
#define GET_URB_TIMEOUT HZ
-struct urb *udl_get_urb(struct drm_device *dev)
+struct urb *udl_get_urb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
spin_lock_irq(&udl->urbs.lock);
@@ -270,9 +296,8 @@ struct urb *udl_get_urb(struct drm_device *dev)
return urb;
}
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
{
- struct udl_device *udl = to_udl(dev);
int ret;
if (WARN_ON(len > udl->urbs.size)) {
@@ -290,9 +315,9 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
}
/* wait until all pending URBs have been processed */
-void udl_sync_pending_urbs(struct drm_device *dev)
+void udl_sync_pending_urbs(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
spin_lock_irq(&udl->urbs.lock);
/* 2 seconds as a sane timeout */
@@ -308,53 +333,55 @@ int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
- mutex_init(&udl->gem_lock);
+ /*
+ * Not all devices provide vendor descriptors with device
+ * information. Initialize to default values of real-world
+ * devices. It is just enough memory for FullHD.
+ */
+ udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
- if (!udl_parse_vendor_descriptor(udl)) {
- ret = -ENODEV;
- DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ ret = udl_parse_vendor_descriptor(udl);
+ if (ret)
goto err;
- }
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
- if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
+ ret = -ENOMEM;
goto err;
}
DRM_DEBUG("\n");
- ret = udl_modeset_init(dev);
+ ret = udl_modeset_init(udl);
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
+ udl_free_urb_list(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
-int udl_drop_usb(struct drm_device *dev)
+int udl_drop_usb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
-
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
+ udl_free_urb_list(udl);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bbb04f98886a..231e829bd709 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -205,6 +205,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
+ struct udl_device *udl = to_udl(dev);
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
@@ -216,7 +217,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
@@ -226,7 +227,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const int byte_offset = line_offset + (clip->x1 << log_bpp);
const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
const int byte_width = drm_rect_width(clip) << log_bpp;
- ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
@@ -239,7 +240,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
+ ret = udl_submit_urb(udl, urb, len);
} else {
udl_urb_completion(urb);
}
@@ -330,6 +331,7 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
@@ -339,7 +341,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -355,7 +357,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -364,6 +366,7 @@ out:
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int idx;
@@ -371,7 +374,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -381,7 +384,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -476,9 +479,9 @@ static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int udl_modeset_init(struct drm_device *dev)
+int udl_modeset_init(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
@@ -535,6 +538,7 @@ int udl_modeset_init(struct drm_device *dev)
return ret;
drm_mode_config_reset(dev);
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 62224992988f..7d670b3a5293 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -170,7 +170,7 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width)
@@ -199,10 +199,10 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- int ret = udl_submit_urb(dev, urb, len);
+ int ret = udl_submit_urb(udl, urb, len);
if (ret)
return ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -EAGAIN;
*urb_ptr = urb;
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index bb7815599435..c41476ddde68 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -16,7 +16,6 @@
*/
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <linux/vmalloc.h>
#include "v3d_drv.h"
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 76816f2551c1..7e789e181af0 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -21,74 +21,74 @@ struct v3d_reg_def {
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(33, 42, V3D_HUB_AXICFG),
- REGDEF(33, 71, V3D_HUB_UIFCFG),
- REGDEF(33, 71, V3D_HUB_IDENT0),
- REGDEF(33, 71, V3D_HUB_IDENT1),
- REGDEF(33, 71, V3D_HUB_IDENT2),
- REGDEF(33, 71, V3D_HUB_IDENT3),
- REGDEF(33, 71, V3D_HUB_INT_STS),
- REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
-
- REGDEF(33, 71, V3D_MMU_CTL),
- REGDEF(33, 71, V3D_MMU_VIO_ADDR),
- REGDEF(33, 71, V3D_MMU_VIO_ID),
- REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
-
- REGDEF(71, 71, V3D_GMP_STATUS(71)),
- REGDEF(71, 71, V3D_GMP_CFG(71)),
- REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(33, 71, V3D_CTL_IDENT0),
- REGDEF(33, 71, V3D_CTL_IDENT1),
- REGDEF(33, 71, V3D_CTL_IDENT2),
- REGDEF(33, 71, V3D_CTL_MISCCFG),
- REGDEF(33, 71, V3D_CTL_INT_STS),
- REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
- REGDEF(33, 71, V3D_CLE_CT0CS),
- REGDEF(33, 71, V3D_CLE_CT0CA),
- REGDEF(33, 71, V3D_CLE_CT0EA),
- REGDEF(33, 71, V3D_CLE_CT1CS),
- REGDEF(33, 71, V3D_CLE_CT1CA),
- REGDEF(33, 71, V3D_CLE_CT1EA),
-
- REGDEF(33, 71, V3D_PTB_BPCA),
- REGDEF(33, 71, V3D_PTB_BPCS),
-
- REGDEF(33, 42, V3D_GMP_STATUS(33)),
- REGDEF(33, 42, V3D_GMP_CFG(33)),
- REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
-
- REGDEF(33, 71, V3D_ERR_FDBGO),
- REGDEF(33, 71, V3D_ERR_FDBGB),
- REGDEF(33, 71, V3D_ERR_FDBGS),
- REGDEF(33, 71, V3D_ERR_STAT),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
- REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
+ REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
}
@@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
}
- if (v3d->ver < 40) {
+ if (v3d->ver < V3D_GEN_41) {
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
@@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int core = 0;
int measure_ms = 1000;
- if (v3d->ver >= 40) {
+ if (v3d->ver >= V3D_GEN_41) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD_VER(cycle_count_reg,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 852015214e97..5e997ae8bc9c 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
@@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
- args->value = (v3d->ver >= 40);
+ args->value = (v3d->ver >= V3D_GEN_41);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
@@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
- { .compatible = "brcm,2711-v3d" },
- { .compatible = "brcm,2712-v3d" },
- { .compatible = "brcm,7268-v3d" },
- { .compatible = "brcm,7278-v3d" },
+ { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
+ { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
+ { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
+ { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
+static void
+v3d_idle_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) {
+ DRM_ERROR("Failed to power up SMS\n");
+ }
+
+ v3d_reset_sms(v3d);
+}
+
+static void
+v3d_power_off_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) {
+ DRM_ERROR("Failed to power off SMS\n");
+ }
+}
+
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
@@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
+ enum v3d_gen gen;
int ret;
u32 mmu_debug;
u32 ident1, ident3;
@@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm);
+ gen = (uintptr_t)of_device_get_match_data(dev);
+ v3d->ver = gen;
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
@@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (v3d->ver >= V3D_GEN_71) {
+ ret = map_regs(v3d, &v3d->sms_regs, "sms");
+ if (ret)
+ return ret;
+ }
+
v3d->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(v3d->clk))
return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
@@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
}
+ v3d_idle_sms(v3d);
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
@@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+ /* Make sure that the V3D tech version retrieved from the HW is equal
+ * to the one advertised by the device tree.
+ */
+ WARN_ON(v3d->ver != gen);
+
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
@@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
}
}
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
goto clk_disable;
@@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+ v3d_power_off_sms(v3d);
+
clk_disable_unprepare(v3d->clk);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9deaefa0f95b..411e47702f8a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -94,23 +94,39 @@ struct v3d_perfmon {
u64 values[] __counted_by(ncounters);
};
+enum v3d_gen {
+ V3D_GEN_33 = 33,
+ V3D_GEN_41 = 41,
+ V3D_GEN_42 = 42,
+ V3D_GEN_71 = 71,
+};
+
+enum v3d_irq {
+ V3D_CORE_IRQ,
+ V3D_HUB_IRQ,
+ V3D_MAX_IRQS,
+};
+
struct v3d_dev {
struct drm_device drm;
/* Short representation (e.g. 33, 41) of the V3D tech version */
- int ver;
+ enum v3d_gen ver;
/* Short representation (e.g. 5, 6) of the V3D tech revision */
int rev;
bool single_irq_line;
+ int irq[V3D_MAX_IRQS];
+
struct v3d_perfmon_info perfmon_info;
void __iomem *hub_regs;
void __iomem *core_regs[3];
void __iomem *bridge_regs;
void __iomem *gca_regs;
+ void __iomem *sms_regs;
struct clk *clk;
struct reset_control *reset;
@@ -199,7 +215,7 @@ to_v3d_dev(struct drm_device *dev)
static inline bool
v3d_has_csd(struct v3d_dev *v3d)
{
- return v3d->ver >= 41;
+ return v3d->ver >= V3D_GEN_41;
}
#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
@@ -261,6 +277,15 @@ to_v3d_fence(struct dma_fence *fence)
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+#define V3D_SMS_IDLE 0x0
+#define V3D_SMS_ISOLATING_FOR_RESET 0xa
+#define V3D_SMS_RESETTING 0xb
+#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc
+#define V3D_SMS_POWER_OFF_STATE 0xd
+
+#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
+#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
+
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
@@ -539,6 +564,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset_sms(struct v3d_dev *v3d);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b1e681630ded..37bf5eecdd2c 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- if (v3d->ver < 40)
+ if (v3d->ver < V3D_GEN_41)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
@@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core)
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
- if (v3d->ver >= 41)
+ if (v3d->ver >= V3D_GEN_41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
@@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d)
}
void
+v3d_reset_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE));
+
+ if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) &&
+ !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) {
+ DRM_ERROR("Failed to wait for SMS reset\n");
+ }
+}
+
+void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
@@ -118,7 +134,10 @@ v3d_reset(struct v3d_dev *v3d)
if (false)
v3d_idle_axi(v3d, 0);
+ v3d_irq_disable(v3d);
+
v3d_idle_gca(v3d);
+ v3d_reset_sms(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
@@ -132,13 +151,13 @@ v3d_reset(struct v3d_dev *v3d)
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
- if (v3d->ver < 33) {
+ if (v3d->ver < V3D_GEN_33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
@@ -151,7 +170,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
- if (v3d->ver > 32)
+ if (v3d->ver >= V3D_GEN_33)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
index 4c5e18590a5c..8ec6ed82b3d9 100644
--- a/drivers/gpu/drm/v3d/v3d_gemfs.c
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -3,14 +3,21 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "v3d_drv.h"
+static int add_param(struct fs_context *fc, const char *key, const char *val)
+{
+ return vfs_parse_fs_string(fc, key, val, strlen(val));
+}
+
void v3d_gemfs_init(struct v3d_dev *v3d)
{
- char huge_opt[] = "huge=within_size";
struct file_system_type *type;
+ struct fs_context *fc;
struct vfsmount *gemfs;
+ int ret;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -28,8 +35,16 @@ void v3d_gemfs_init(struct v3d_dev *v3d)
if (!type)
goto err;
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
- if (IS_ERR(gemfs))
+ fc = fs_context_for_mount(type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ goto err;
+ ret = add_param(fc, "source", "tmpfs");
+ if (!ret)
+ ret = add_param(fc, "huge", "within_size");
+ if (!ret)
+ gemfs = fc_mount_longterm(fc);
+ put_fs_context(fc);
+ if (ret)
goto err;
v3d->gemfs = gemfs;
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 72b6a119412f..a515a301e480 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
+ if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg)
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
- static const char *const v3d41_axi_ids[] = {
- "L2T",
- "PTB",
- "PSE",
- "TLB",
- "CLE",
- "TFU",
- "MMU",
- "GMP",
+ static const struct {
+ u32 begin;
+ u32 end;
+ const char *client;
+ } v3d41_axi_ids[] = {
+ {0x00, 0x20, "L2T"},
+ {0x20, 0x21, "PTB"},
+ {0x40, 0x41, "PSE"},
+ {0x60, 0x80, "TLB"},
+ {0x80, 0x88, "CLE"},
+ {0xA0, 0xA1, "TFU"},
+ {0xC0, 0xE0, "MMU"},
+ {0xE0, 0xE1, "GMP"},
+ }, v3d71_axi_ids[] = {
+ {0x00, 0x30, "L2T"},
+ {0x30, 0x38, "CLE"},
+ {0x38, 0x39, "PTB"},
+ {0x39, 0x3A, "PSE"},
+ {0x3A, 0x3B, "CSD"},
+ {0x40, 0x60, "TLB"},
+ {0x60, 0x70, "MMU"},
+ {0x7C, 0x7E, "TFU"},
+ {0x7F, 0x80, "GMP"},
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
- if (v3d->ver >= 41) {
- axi_id = axi_id >> 5;
- if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
- client = v3d41_axi_ids[axi_id];
+ if (v3d->ver >= V3D_GEN_71) {
+ size_t i;
+
+ axi_id = axi_id & 0x7F;
+ for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) {
+ if (axi_id >= v3d71_axi_ids[i].begin &&
+ axi_id < v3d71_axi_ids[i].end) {
+ client = v3d71_axi_ids[i].client;
+ break;
+ }
+ }
+ } else if (v3d->ver >= V3D_GEN_41) {
+ size_t i;
+
+ axi_id = axi_id & 0xFF;
+ for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) {
+ if (axi_id >= v3d41_axi_ids[i].begin &&
+ axi_id < v3d41_axi_ids[i].end) {
+ client = v3d41_axi_ids[i].client;
+ break;
+ }
+ }
}
- dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
- if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
@@ -228,7 +260,7 @@ v3d_hub_irq(int irq, void *arg)
int
v3d_irq_init(struct v3d_dev *v3d)
{
- int irq1, ret, core;
+ int irq, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -239,17 +271,24 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
- irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
- if (irq1 == -EPROBE_DEFER)
- return irq1;
- if (irq1 > 0) {
- ret = devm_request_irq(v3d->drm.dev, irq1,
+ irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (irq > 0) {
+ v3d->irq[V3D_CORE_IRQ] = irq;
+
+ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
- ret = devm_request_irq(v3d->drm.dev,
- platform_get_irq(v3d_to_pdev(v3d), 0),
+
+ irq = platform_get_irq(v3d_to_pdev(v3d), 0);
+ if (irq < 0)
+ return irq;
+ v3d->irq[V3D_HUB_IRQ] = irq;
+
+ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ],
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
@@ -257,8 +296,12 @@ v3d_irq_init(struct v3d_dev *v3d)
} else {
v3d->single_irq_line = true;
- ret = devm_request_irq(v3d->drm.dev,
- platform_get_irq(v3d_to_pdev(v3d), 0),
+ irq = platform_get_irq(v3d_to_pdev(v3d), 0);
+ if (irq < 0)
+ return irq;
+ v3d->irq[V3D_CORE_IRQ] = irq;
+
+ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
@@ -299,6 +342,12 @@ v3d_irq_disable(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
+ /* Finish any interrupt handler still in flight. */
+ for (int i = 0; i < V3D_MAX_IRQS; i++) {
+ if (v3d->irq[i])
+ synchronize_irq(v3d->irq[i]);
+ }
+
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 3ebda2fa46fc..9a3fe5255874 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d)
const struct v3d_perf_counter_desc *counters = NULL;
unsigned int max = 0;
- if (v3d->ver >= 71) {
+ if (v3d->ver >= V3D_GEN_71) {
counters = v3d_v71_performance_counters;
max = ARRAY_SIZE(v3d_v71_performance_counters);
- } else if (v3d->ver >= 42) {
+ } else if (v3d->ver >= V3D_GEN_42) {
counters = v3d_v42_performance_counters;
max = ARRAY_SIZE(v3d_v42_performance_counters);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6da3c69082bd..c1870265eaee 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -515,4 +515,30 @@
# define V3D_ERR_VPAERGS BIT(1)
# define V3D_ERR_VPAEABB BIT(0)
+#define V3D_SMS_REE_CS 0x00000
+#define V3D_SMS_TEE_CS 0x00400
+# define V3D_SMS_INTERRUPT BIT(31)
+# define V3D_SMS_POWER_OFF BIT(30)
+# define V3D_SMS_CLEAR_POWER_OFF BIT(29)
+# define V3D_SMS_LOCK BIT(28)
+# define V3D_SMS_CLEAR_LOCK BIT(27)
+# define V3D_SMS_SVP_MODE_EXIT BIT(26)
+# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25)
+# define V3D_SMS_SVP_MODE_ENTER BIT(24)
+# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23)
+# define V3D_SMS_THEIR_MODE_EXIT BIT(22)
+# define V3D_SMS_THEIR_MODE_ENTER BIT(21)
+# define V3D_SMS_OUR_MODE_EXIT BIT(20)
+# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19)
+# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10)
+# define V3D_SMS_SEQ_PC_SHIFT 10
+# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8)
+# define V3D_SMS_HUBCORE_STATUS_SHIFT 8
+# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6)
+# define V3D_SMS_NEW_MODE_SHIFT 6
+# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4)
+# define V3D_SMS_OLD_MODE_SHIFT 4
+# define V3D_SMS_STATE_MASK V3D_MASK(3, 0)
+# define V3D_SMS_STATE_SHIFT 0
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4a7701a33cf8..cb9df8822472 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -199,7 +199,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_dev *v3d = job->v3d;
struct v3d_file_priv *file = job->file->driver_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
- struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
unsigned long flags;
@@ -209,7 +208,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
else
preempt_disable();
- v3d_stats_update(local_stats, now);
+ /* Don't update the local stats if the file context has already closed */
+ if (file)
+ v3d_stats_update(&file->stats[queue], now);
+ else
+ drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+
v3d_stats_update(global_stats, now);
if (IS_ENABLED(CONFIG_LOCKDEP))
@@ -357,11 +361,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
- if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
@@ -412,7 +416,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
*
* XXX: Set the CFG7 register
*/
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
/* CFG0 write kicks off the job. */
@@ -741,14 +745,9 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
-/* If the current address or return address have changed, then the GPU
- * has probably made progress and we should delay the reset. This
- * could fail if the GPU got in an infinite loop in the CL, but that
- * is pretty unlikely outside of an i-g-t testcase.
- */
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,10 +757,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+ /* If the current address or return address have changed, then the GPU
+ * has probably made progress and we should delay the reset. This
+ * could fail if the GPU got in an infinite loop in the CL, but that
+ * is pretty unlikely outside of an i-g-t testcase.
+ */
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
- return DRM_GPU_SCHED_STAT_NOMINAL;
+
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
@@ -800,12 +805,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
- /* If we've made progress, skip reset and let the timer get
- * rearmed.
+ /* If we've made progress, skip reset, add the job to the pending
+ * list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
- return DRM_GPU_SCHED_STAT_NOMINAL;
+
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4ff5de46fb22..5171ffe9012d 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -169,7 +169,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->file = file_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
+ 1, v3d_priv, file_priv->client_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
index e70d7c3076ac..577d9a956369 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = {
DRM_SIMPLE_MODE(640, 480, 64, 48)
};
+/**
+ * vc4_mock_atomic_add_output() - Enables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to enable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and enables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_add_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->active = true;
return 0;
}
+/**
+ * vc4_mock_atomic_del_output() - Disables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to disable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and disables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_del_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 992e8f5c5c6e..d1f694029169 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_atomic_state *state;
};
static bool check_fifo_conflict(struct kunit *test,
@@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test,
@@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
params->check_fn));
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
- struct drm_device *drm;
struct vc4_dev *vc4;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
@@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&ctx, 0);
-
- drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return 0;
}
@@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
-
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 7125773889f1..4aaa587be3a5 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -679,7 +679,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
static void vc4_bo_cache_time_timer(struct timer_list *t)
{
- struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
+ struct vc4_dev *vc4 = timer_container_of(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 779b22efe27b..458e5d987964 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -552,8 +552,6 @@ struct vc4_dsi {
struct vc4_encoder encoder;
struct mipi_dsi_host dsi_host;
- struct kref kref;
-
struct platform_device *pdev;
struct drm_bridge *out_bridge;
@@ -1160,12 +1158,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
}
static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge,
&dsi->bridge, flags);
}
@@ -1621,29 +1620,11 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
-static void vc4_dsi_release(struct kref *kref)
-{
- struct vc4_dsi *dsi =
- container_of(kref, struct vc4_dsi, kref);
-
- kfree(dsi);
-}
-
-static void vc4_dsi_get(struct vc4_dsi *dsi)
-{
- kref_get(&dsi->kref);
-}
-
-static void vc4_dsi_put(struct vc4_dsi *dsi)
-{
- kref_put(&dsi->kref, &vc4_dsi_release);
-}
-
static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
{
struct vc4_dsi *dsi = ptr;
- vc4_dsi_put(dsi);
+ drm_bridge_put(&dsi->bridge);
}
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -1654,7 +1635,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- vc4_dsi_get(dsi);
+ drm_bridge_get(&dsi->bridge);
ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
if (ret)
@@ -1809,15 +1790,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct vc4_dsi, bridge, &vc4_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dev_set_drvdata(dev, dsi);
- kref_init(&dsi->kref);
-
dsi->pdev = pdev;
- dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
#ifdef CONFIG_OF
dsi->bridge.of_node = dev->of_node;
#endif
@@ -1835,7 +1813,6 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
- vc4_dsi_put(dsi);
}
struct platform_driver vc4_dsi_driver = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 8125f87edc60..255e5817618e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -326,7 +326,7 @@ vc4_reset_work(struct work_struct *work)
static void
vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct vc4_dev *vc4 = timer_container_of(vc4, t, hangcheck.timer);
struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 37238a12baa5..07c91b450f93 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
+#include <sound/jack.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* the lock for now.
*/
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
- drm_atomic_helper_connector_hdmi_hotplug(connector, status);
-
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
@@ -561,7 +562,7 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
ret = drm_connector_hdmi_audio_init(connector, dev->dev,
&vc4_hdmi_audio_funcs,
- 8, false, -1);
+ 8, 0, false, -1);
if (ret)
return ret;
@@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
.shutdown = vc4_hdmi_audio_shutdown,
};
+static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &vc4_hdmi->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL);
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2274,6 +2291,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
+ ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
+ &vc4_hdmi_audio_funcs, 8, 0, false,
+ -1);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2288,6 +2311,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
+ dai_link->init = vc4_hdmi_codec_init;
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index e3d989ca302b..a31157c99bee 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -4,6 +4,7 @@
#include <drm/drm_connector.h>
#include <media/cec.h>
#include <sound/dmaengine_pcm.h>
+#include <sound/hdmi-codec.h>
#include <sound/soc.h>
#include "vc4_drv.h"
@@ -211,6 +212,12 @@ struct vc4_hdmi {
* KMS hooks. Protected by @mutex.
*/
enum hdmi_colorspace output_format;
+
+ /**
+ * @hdmi_jack: Represents the connection state of the HDMI plug, for
+ * ALSA jack detection.
+ */
+ struct snd_soc_jack hdmi_jack;
};
#define connector_to_vc4_hdmi(_connector) \
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f5b167417428..8f983edb81ff 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -530,6 +530,7 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -568,7 +569,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd = &mode_cmd_local;
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c5e84d3494d2..056d344c5411 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane,
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
- vc4_dlist_write(vc4_state, kernel);
+ vc4_dlist_write(vc4_state, kernel);
}
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2752ab4f1c97..260c64733972 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -32,7 +32,7 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
@@ -52,7 +52,7 @@
static struct vgem_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
} *vgem_device;
static int vgem_open(struct drm_device *dev, struct drm_file *file)
@@ -127,27 +127,27 @@ static const struct drm_driver vgem_driver = {
static int __init vgem_init(void)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
- pdev = platform_device_register_simple("vgem", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create("vgem", NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- dma_coerce_mask_and_coherent(&pdev->dev,
+ dma_coerce_mask_and_coherent(&fdev->dev,
DMA_BIT_MASK(64));
- vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
+ vgem_device = devm_drm_dev_alloc(&fdev->dev, &vgem_driver,
struct vgem_device, drm);
if (IS_ERR(vgem_device)) {
ret = PTR_ERR(vgem_device);
goto out_devres;
}
- vgem_device->platform = pdev;
+ vgem_device->faux_dev = fdev;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
@@ -157,19 +157,19 @@ static int __init vgem_init(void)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
static void __exit vgem_exit(void)
{
- struct platform_device *pdev = vgem_device->platform;
+ struct faux_device *fdev = vgem_device->faux_dev;
drm_dev_unregister(&vgem_device->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 37bb1fb58cf9..fd76730fd38c 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -53,30 +53,15 @@ static void vgem_fence_release(struct dma_fence *base)
dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%llu", fence->seqno);
-}
-
-static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%llu",
- dma_fence_is_signaled(fence) ? fence->seqno : 0);
-}
-
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
-
- .fence_value_str = vgem_fence_value_str,
- .timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = from_timer(fence, t, timer);
+ struct vgem_fence *fence = timer_container_of(fence, t, timer);
dma_fence_signal(&fence->base);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 59a45e74a641..e5805ca646c7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -66,6 +66,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -73,7 +74,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
@@ -293,6 +294,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
@@ -314,7 +316,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index e32e680c7197..71c6ccad4b99 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -130,10 +130,10 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
static void virtio_gpu_shutdown(struct virtio_device *vdev)
{
- /*
- * drm does its own synchronization on shutdown.
- * Do nothing here, opt out of device reset.
- */
+ struct drm_device *dev = vdev->priv;
+
+ /* stop talking to the device */
+ drm_dev_unplug(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 87e584add042..698ea7adb951 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,7 +366,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (bo->dumb || obj->import_attach) {
+ if (bo->dumb || drm_gem_is_imported(obj)) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
0);
@@ -374,7 +374,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
if (ret)
goto err_fence;
@@ -417,7 +417,7 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
}
obj = state->fb->obj[0];
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
virtio_gpu_cleanup_imported_obj(obj);
}
@@ -508,11 +508,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
- /* Only support mapped shmem bo */
- if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
return -ENODEV;
- iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
sb->format = plane->state->fb->format;
sb->height = plane->state->fb->height;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 4de2a63ccd18..ce49282198cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
@@ -207,7 +206,7 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct dma_buf_attachment *attach = obj->import_attach;
- if (attach) {
+ if (drm_gem_is_imported(obj)) {
struct dma_buf *dmabuf = attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 9def079f685b..3c02f928ffe6 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -14,3 +14,18 @@ config DRM_VKMS
a VKMS.
If M is selected the module will be called vkms.
+
+config DRM_VKMS_KUNIT_TEST
+ tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS
+ depends on DRM_VKMS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for VKMS. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on VKMS.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 1b28a6a32948..d657865e573f 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -6,6 +6,9 @@ vkms-y := \
vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
- vkms_writeback.o
+ vkms_writeback.o \
+ vkms_connector.o \
+ vkms_config.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig
new file mode 100644
index 000000000000..6a2d87068edc
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_VKMS_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
new file mode 100644
index 000000000000..5750f0bd9d40
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+vkms-kunit-tests-y := \
+ vkms_config_test.o \
+ vkms_format_test.o
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
new file mode 100644
index 000000000000..ff4566cf9925
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include "../vkms_config.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static size_t vkms_config_get_num_planes(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_encoders(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_connectors(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ count++;
+
+ return count;
+}
+
+static struct vkms_config_plane *get_first_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ return plane_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ return crtc_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ return encoder_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_connector *get_first_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ return connector_cfg;
+
+ return NULL;
+}
+
+struct default_config_case {
+ bool enable_cursor;
+ bool enable_writeback;
+ bool enable_overlay;
+};
+
+static void vkms_config_test_empty_config(struct kunit *test)
+{
+ struct vkms_config *config;
+ const char *dev_name = "test";
+
+ config = vkms_config_create(dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* The dev_name string and the config have different lifetimes */
+ dev_name = NULL;
+ KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test");
+
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static struct default_config_case default_config_cases[] = {
+ { false, false, false },
+ { true, false, false },
+ { true, true, false },
+ { true, false, true },
+ { false, true, false },
+ { false, true, true },
+ { false, false, true },
+ { true, true, true },
+};
+
+KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
+
+static void vkms_config_test_default_config(struct kunit *test)
+{
+ const struct default_config_case *params = test->param_value;
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int n_primaries = 0;
+ int n_cursors = 0;
+ int n_overlays = 0;
+
+ config = vkms_config_default_create(params->enable_cursor,
+ params->enable_writeback,
+ params->enable_overlay);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Planes */
+ vkms_config_for_each_plane(config, plane_cfg) {
+ switch (vkms_config_plane_get_type(plane_cfg)) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ n_primaries++;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ n_cursors++;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ n_overlays++;
+ break;
+ default:
+ KUNIT_FAIL_AND_ABORT(test, "Unknown plane type");
+ }
+ }
+ KUNIT_EXPECT_EQ(test, n_primaries, 1);
+ KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0);
+ KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0);
+
+ /* CRTCs */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+
+ crtc_cfg = get_first_crtc(config);
+ KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg),
+ params->enable_writeback);
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ int n_possible_crtcs = 0;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc);
+ n_possible_crtcs++;
+ }
+ KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1);
+ }
+
+ /* Encoders */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1);
+
+ /* Connectors */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_planes(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ int n_planes = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ n_planes++;
+ KUNIT_ASSERT_EQ(test, n_planes, 0);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+ n_planes = 0;
+
+ plane_cfg2 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 2);
+ n_planes = 0;
+
+ vkms_config_destroy_plane(plane_cfg1);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ int n_encoders = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ n_encoders++;
+ KUNIT_ASSERT_EQ(test, n_encoders, 0);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_destroy_encoder(config, encoder_cfg2);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_connectors(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int n_connectors = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ n_connectors++;
+ KUNIT_ASSERT_EQ(test, n_connectors, 0);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ connector_cfg2 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1 &&
+ connector_cfg != connector_cfg2)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 2);
+ n_connectors = 0;
+
+ vkms_config_destroy_connector(connector_cfg2);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_plane_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No planes */
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many planes */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_plane(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_type(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: No primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple primary planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple cursor planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary and one cursor plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without primary plane */
+ crtc_cfg = vkms_config_create_crtc(config);
+ encoder_cfg = vkms_config_create_encoder(config);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with a primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: Primary plane without a possible CRTC */
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_crtc_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No CRTCs */
+ crtc_cfg = get_first_crtc(config);
+ vkms_config_destroy_crtc(config, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many CRTCs */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_crtc(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_encoder_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No encoders */
+ encoder_cfg = get_first_encoder(config);
+ vkms_config_destroy_encoder(config, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many encoders */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_encoder(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ crtc_cfg1 = get_first_crtc(config);
+
+ /* Invalid: Encoder without a possible CRTC */
+ encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with shared encoder */
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without encoders */
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: First CRTC with 2 possible encoder */
+ vkms_config_destroy_plane(plane_cfg);
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_connector_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No connectors */
+ connector_cfg = get_first_connector(config);
+ vkms_config_destroy_connector(connector_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many connectors */
+ for (n = 0; n <= 32; n++)
+ connector_cfg = vkms_config_create_connector(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg = get_first_encoder(config);
+ connector_cfg = get_first_connector(config);
+
+ /* Invalid: Connector without a possible encoder */
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_attach_different_configs(struct kunit *test)
+{
+ struct vkms_config *config1, *config2;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int err;
+
+ config1 = vkms_config_create("test1");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1);
+
+ config2 = vkms_config_create("test2");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2);
+
+ plane_cfg1 = vkms_config_create_plane(config1);
+ crtc_cfg1 = vkms_config_create_crtc(config1);
+ encoder_cfg1 = vkms_config_create_encoder(config1);
+ connector_cfg1 = vkms_config_create_connector(config1);
+
+ plane_cfg2 = vkms_config_create_plane(config2);
+ crtc_cfg2 = vkms_config_create_crtc(config2);
+ encoder_cfg2 = vkms_config_create_encoder(config2);
+ connector_cfg2 = vkms_config_create_connector(config2);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ vkms_config_destroy(config1);
+ vkms_config_destroy(config2);
+}
+
+static void vkms_config_test_plane_attach_crtc(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *overlay_cfg;
+ struct vkms_config_plane *primary_cfg;
+ struct vkms_config_plane *cursor_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ overlay_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+ primary_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+ cursor_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+
+ /* No primary or cursor planes */
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Overlay plane, but no primary or cursor planes */
+ err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary plane, attaching it twice must fail */
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_NE(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary and cursor planes */
+ err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_cursor_plane(config, crtc_cfg),
+ cursor_cfg);
+
+ /* Detach primary and destroy cursor plane */
+ vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg);
+ vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg);
+ vkms_config_destroy_plane(cursor_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ plane_cfg2 = vkms_config_create_plane(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and 2 */
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */
+ vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and 2 */
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */
+ vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ int n_encoders = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ connector_cfg2 = vkms_config_create_connector(config);
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+
+ /* No possible encoders */
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoders 1 and 2 */
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1 &&
+ possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */
+ vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+
+ vkms_config_destroy(config);
+}
+
+static struct kunit_case vkms_config_test_cases[] = {
+ KUNIT_CASE(vkms_config_test_empty_config),
+ KUNIT_CASE_PARAM(vkms_config_test_default_config,
+ default_config_gen_params),
+ KUNIT_CASE(vkms_config_test_get_planes),
+ KUNIT_CASE(vkms_config_test_get_crtcs),
+ KUNIT_CASE(vkms_config_test_get_encoders),
+ KUNIT_CASE(vkms_config_test_get_connectors),
+ KUNIT_CASE(vkms_config_test_invalid_plane_number),
+ KUNIT_CASE(vkms_config_test_valid_plane_type),
+ KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_crtc_number),
+ KUNIT_CASE(vkms_config_test_invalid_encoder_number),
+ KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_connector_number),
+ KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders),
+ KUNIT_CASE(vkms_config_test_attach_different_configs),
+ KUNIT_CASE(vkms_config_test_plane_attach_crtc),
+ KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ {}
+};
+
+static struct kunit_suite vkms_config_test_suite = {
+ .name = "vkms-config",
+ .test_cases = vkms_config_test_cases,
+};
+
+kunit_test_suite(vkms_config_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms config utility");
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
new file mode 100644
index 000000000000..2e1daef94831
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+
+#include "../../drm_crtc_internal.h"
+
+#include "../vkms_formats.h"
+
+#define TEST_BUFF_SIZE 50
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+/**
+ * struct pixel_yuv_u8 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ */
+struct pixel_yuv_u8 {
+ u8 y, u, v;
+};
+
+/*
+ * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * conversions in VKMS between YUV to ARGB
+ *
+ * @encoding: Encoding used to convert RGB to YUV
+ * @range: Range used to convert RGB to YUV
+ * @n_colors: Count of test colors in this case
+ * @format_pair.name: Name used for this color conversion, used to
+ * clarify the test results
+ * @format_pair.rgb: RGB color tested
+ * @format_pair.yuv: Same color as @format_pair.rgb, but converted to
+ * YUV using @encoding and @range.
+ */
+struct yuv_u8_to_argb_u16_case {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ size_t n_colors;
+ struct format_pair {
+ char *name;
+ struct pixel_yuv_u8 yuv;
+ struct pixel_argb_u16 argb;
+ } colors[TEST_BUFF_SIZE];
+};
+
+/*
+ * The YUV color representation were acquired via the colour python framework.
+ * Below are the function calls used for generating each case.
+ *
+ * For more information got to the docs:
+ * https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
+ */
+static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ *
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 full range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 limited range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 full range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * int_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 limited range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 full range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 limited range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+};
+
+/*
+ * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * colors to ARGB colors in VKMS
+ *
+ * This test will use the functions get_conversion_matrix_to_argb_u16 and
+ * argb_u16_from_yuv888 to convert YUV colors (stored in
+ * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ *
+ * The conversion between YUV and RGB is not totally reversible, so there may be
+ * some difference between the expected value and the result.
+ * In addition, there may be some rounding error as the input color is 8 bits
+ * and output color is 16 bits.
+ */
+static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+{
+ const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ struct pixel_argb_u16 argb;
+
+ for (size_t i = 0; i < param->n_colors; i++) {
+ const struct format_pair *color = &param->colors[i];
+ struct conversion_matrix matrix;
+
+ get_conversion_matrix_to_argb_u16
+ (DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
+
+ argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
+ "On the A channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.a, argb.a);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.r, color->argb.r), 0x1ff,
+ "On the R channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.r, argb.r);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.g, color->argb.g), 0x1ff,
+ "On the G channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.g, argb.g);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.b, color->argb.b), 0x1ff,
+ "On the B channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.b, argb.b);
+ }
+}
+
+static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
+ drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
+}
+
+KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
+ vkms_format_test_yuv_u8_to_argb_u16_case_desc
+);
+
+static struct kunit_case vkms_format_test_cases[] = {
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ {}
+};
+
+static struct kunit_suite vkms_format_test_suite = {
+ .name = "vkms-format",
+ .test_cases = vkms_format_test_cases,
+};
+
+kunit_test_suite(vkms_format_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms format conversion");
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
new file mode 100644
index 000000000000..a1df5659b0fb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+#include <kunit/visibility.h>
+
+#include "vkms_config.h"
+
+struct vkms_config *vkms_config_create(const char *dev_name)
+{
+ struct vkms_config *config;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->dev_name = kstrdup_const(dev_name, GFP_KERNEL);
+ if (!config->dev_name) {
+ kfree(config);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&config->planes);
+ INIT_LIST_HEAD(&config->crtcs);
+ INIT_LIST_HEAD(&config->encoders);
+ INIT_LIST_HEAD(&config->connectors);
+
+ return config;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
+
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_create(DEFAULT_DEVICE_NAME);
+ if (IS_ERR(config))
+ return config;
+
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+ if (IS_ERR(crtc_cfg))
+ goto err_alloc;
+ vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+
+ if (enable_overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg,
+ DRM_PLANE_TYPE_OVERLAY);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+ }
+
+ if (enable_cursor) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+
+ encoder_cfg = vkms_config_create_encoder(config);
+ if (IS_ERR(encoder_cfg))
+ goto err_alloc;
+
+ if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg))
+ goto err_alloc;
+
+ connector_cfg = vkms_config_create_connector(config);
+ if (IS_ERR(connector_cfg))
+ goto err_alloc;
+
+ if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg))
+ goto err_alloc;
+
+ return config;
+
+err_alloc:
+ vkms_config_destroy(config);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create);
+
+void vkms_config_destroy(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg, *plane_tmp;
+ struct vkms_config_crtc *crtc_cfg, *crtc_tmp;
+ struct vkms_config_encoder *encoder_cfg, *encoder_tmp;
+ struct vkms_config_connector *connector_cfg, *connector_tmp;
+
+ list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link)
+ vkms_config_destroy_plane(plane_cfg);
+
+ list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link)
+ vkms_config_destroy_crtc(config, crtc_cfg);
+
+ list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link)
+ vkms_config_destroy_encoder(config, encoder_cfg);
+
+ list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link)
+ vkms_config_destroy_connector(connector_cfg);
+
+ kfree_const(config->dev_name);
+ kfree(config);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy);
+
+static bool valid_plane_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_planes;
+
+ n_planes = list_count_nodes((struct list_head *)&config->planes);
+ if (n_planes <= 0 || n_planes >= 32) {
+ drm_info(dev, "The number of planes must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_planes_for_crtc(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+ bool has_primary_plane = false;
+ bool has_cursor_plane = false;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc != crtc_cfg)
+ continue;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ if (has_primary_plane) {
+ drm_info(dev, "Multiple primary planes\n");
+ return false;
+ }
+
+ has_primary_plane = true;
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ if (has_cursor_plane) {
+ drm_info(dev, "Multiple cursor planes\n");
+ return false;
+ }
+
+ has_cursor_plane = true;
+ }
+ }
+ }
+
+ if (!has_primary_plane) {
+ drm_info(dev, "Primary plane not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_plane_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ if (xa_empty(&plane_cfg->possible_crtcs)) {
+ drm_info(dev, "All planes must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_crtc_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_crtcs;
+
+ n_crtcs = list_count_nodes((struct list_head *)&config->crtcs);
+ if (n_crtcs <= 0 || n_crtcs >= 32) {
+ drm_info(dev, "The number of CRTCs must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_encoders;
+
+ n_encoders = list_count_nodes((struct list_head *)&config->encoders);
+ if (n_encoders <= 0 || n_encoders >= 32) {
+ drm_info(dev, "The number of encoders must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ if (xa_empty(&encoder_cfg->possible_crtcs)) {
+ drm_info(dev, "All encoders must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ bool crtc_has_encoder = false;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg,
+ idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ crtc_has_encoder = true;
+ }
+ }
+
+ if (!crtc_has_encoder) {
+ drm_info(dev, "All CRTCs must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_connector_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_connectors;
+
+ n_connectors = list_count_nodes((struct list_head *)&config->connectors);
+ if (n_connectors <= 0 || n_connectors >= 32) {
+ drm_info(dev, "The number of connectors must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_connector_possible_encoders(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg) {
+ if (xa_empty(&connector_cfg->possible_encoders)) {
+ drm_info(dev,
+ "All connectors must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vkms_config_is_valid(const struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ if (!valid_plane_number(config))
+ return false;
+
+ if (!valid_crtc_number(config))
+ return false;
+
+ if (!valid_encoder_number(config))
+ return false;
+
+ if (!valid_connector_number(config))
+ return false;
+
+ if (!valid_plane_possible_crtcs(config))
+ return false;
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (!valid_planes_for_crtc(config, crtc_cfg))
+ return false;
+ }
+
+ if (!valid_encoder_possible_crtcs(config))
+ return false;
+
+ if (!valid_connector_possible_encoders(config))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid);
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ const char *dev_name;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config);
+ seq_printf(m, "dev_name=%s\n", dev_name);
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ seq_puts(m, "plane:\n");
+ seq_printf(m, "\ttype=%d\n",
+ vkms_config_plane_get_type(plane_cfg));
+ }
+
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ seq_puts(m, "crtc:\n");
+ seq_printf(m, "\twriteback=%d\n",
+ vkms_config_crtc_get_writeback(crtc_cfg));
+ }
+
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
+ seq_puts(m, "encoder\n");
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
+ seq_puts(m, "connector\n");
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+void vkms_config_register_debugfs(struct vkms_device *vkms_device)
+{
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+}
+
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL);
+ if (!plane_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ plane_cfg->config = config;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&plane_cfg->link, &config->planes);
+
+ return plane_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane);
+
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg)
+{
+ xa_destroy(&plane_cfg->possible_crtcs);
+ list_del(&plane_cfg->link);
+ kfree(plane_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane);
+
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (plane_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc);
+
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&plane_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc);
+
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL);
+ if (!crtc_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_cfg->config = config;
+ vkms_config_crtc_set_writeback(crtc_cfg, false);
+
+ list_add_tail(&crtc_cfg->link, &config->crtcs);
+
+ return crtc_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc);
+
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg);
+
+ list_del(&crtc_cfg->link);
+ kfree(crtc_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc);
+
+/**
+ * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with
+ * the specific type
+ * @config: Configuration containing the CRTC and the plane
+ * @crtc_cfg: Only find planes attached to this CRTC
+ * @type: Plane type to search
+ *
+ * Returns:
+ * The first plane found attached to @crtc_cfg with the type @type.
+ */
+static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg,
+ enum drm_plane_type type)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *possible_crtc;
+ enum drm_plane_type current_type;
+ unsigned long idx = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ current_type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg && current_type == type)
+ return plane_cfg;
+ }
+ }
+
+ return NULL;
+}
+
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane);
+
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane);
+
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL);
+ if (!encoder_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ encoder_cfg->config = config;
+ xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&encoder_cfg->link, &config->encoders);
+
+ return encoder_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder);
+
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+
+ xa_destroy(&encoder_cfg->possible_crtcs);
+ list_del(&encoder_cfg->link);
+ kfree(encoder_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder);
+
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (encoder_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc);
+
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&encoder_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc);
+
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL);
+ if (!connector_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ connector_cfg->config = config;
+ xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
+
+ list_add_tail(&connector_cfg->link, &config->connectors);
+
+ return connector_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector);
+
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg)
+{
+ xa_destroy(&connector_cfg->possible_encoders);
+ list_del(&connector_cfg->link);
+ kfree(connector_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector);
+
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ u32 encoder_idx = 0;
+
+ if (connector_cfg->config != encoder_cfg->config)
+ return -EINVAL;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
+ encoder_cfg, xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder);
+
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ xa_erase(&connector_cfg->possible_encoders, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
new file mode 100644
index 000000000000..0118e3f99706
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONFIG_H_
+#define _VKMS_CONFIG_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @dev_name: Name of the device
+ * @planes: List of planes configured for the device
+ * @crtcs: List of CRTCs configured for the device
+ * @encoders: List of encoders configured for the device
+ * @connectors: List of connectors configured for the device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
+struct vkms_config {
+ const char *dev_name;
+ struct list_head planes;
+ struct list_head crtcs;
+ struct list_head encoders;
+ struct list_head connectors;
+ struct vkms_device *dev;
+};
+
+/**
+ * struct vkms_config_plane
+ *
+ * @link: Link to the others planes in vkms_config
+ * @config: The vkms_config this plane belongs to
+ * @type: Type of the plane. The creator of configuration needs to ensures that
+ * at least one primary plane is present.
+ * @possible_crtcs: Array of CRTCs that can be used with this plane
+ * @plane: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS plane during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_plane {
+ struct list_head link;
+ struct vkms_config *config;
+
+ enum drm_plane_type type;
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct vkms_plane *plane;
+};
+
+/**
+ * struct vkms_config_crtc
+ *
+ * @link: Link to the others CRTCs in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @crtc: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS CRTC during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_crtc {
+ struct list_head link;
+ struct vkms_config *config;
+
+ bool writeback;
+
+ /* Internal usage */
+ struct vkms_output *crtc;
+};
+
+/**
+ * struct vkms_config_encoder
+ *
+ * @link: Link to the others encoders in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @possible_crtcs: Array of CRTCs that can be used with this encoder
+ * @encoder: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS encoder
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_encoder {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct drm_encoder *encoder;
+};
+
+/**
+ * struct vkms_config_connector
+ *
+ * @link: Link to the others connector in vkms_config
+ * @config: The vkms_config this connector belongs to
+ * @possible_encoders: Array of encoders that can be used with this connector
+ * @connector: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS connector
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_connector {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_encoders;
+
+ /* Internal usage */
+ struct vkms_connector *connector;
+};
+
+/**
+ * vkms_config_for_each_plane - Iterate over the vkms_config planes
+ * @config: &struct vkms_config pointer
+ * @plane_cfg: &struct vkms_config_plane pointer used as cursor
+ */
+#define vkms_config_for_each_plane(config, plane_cfg) \
+ list_for_each_entry((plane_cfg), &(config)->planes, link)
+
+/**
+ * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs
+ * @config: &struct vkms_config pointer
+ * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_for_each_crtc(config, crtc_cfg) \
+ list_for_each_entry((crtc_cfg), &(config)->crtcs, link)
+
+/**
+ * vkms_config_for_each_encoder - Iterate over the vkms_config encoders
+ * @config: &struct vkms_config pointer
+ * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_for_each_encoder(config, encoder_cfg) \
+ list_for_each_entry((encoder_cfg), &(config)->encoders, link)
+
+/**
+ * vkms_config_for_each_connector - Iterate over the vkms_config connectors
+ * @config: &struct vkms_config pointer
+ * @connector_cfg: &struct vkms_config_connector pointer used as cursor
+ */
+#define vkms_config_for_each_connector(config, connector_cfg) \
+ list_for_each_entry((connector_cfg), &(config)->connectors, link)
+
+/**
+ * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane
+ * possible CRTCs
+ * @plane_cfg: &struct vkms_config_plane pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \
+ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_encoder_for_each_possible_crtc - Iterate over the
+ * vkms_config_encoder possible CRTCs
+ * @encoder_cfg: &struct vkms_config_encoder pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \
+ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_connector_for_each_possible_encoder - Iterate over the
+ * vkms_config_connector possible encoders
+ * @connector_cfg: &struct vkms_config_connector pointer
+ * @idx: Index of the cursor
+ * @possible_encoder: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \
+ xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder))
+
+/**
+ * vkms_config_create() - Create a new VKMS configuration
+ * @dev_name: Name of the device
+ *
+ * Returns:
+ * The new vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_create(const char *dev_name);
+
+/**
+ * vkms_config_default_create() - Create the configuration for the default device
+ * @enable_cursor: Create or not a cursor plane
+ * @enable_writeback: Create or not a writeback connector
+ * @enable_overlay: Create or not overlay planes
+ *
+ * Returns:
+ * The default vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay);
+
+/**
+ * vkms_config_destroy() - Free a VKMS configuration
+ * @config: vkms_config to free
+ */
+void vkms_config_destroy(struct vkms_config *config);
+
+/**
+ * vkms_config_get_device_name() - Return the name of the device
+ * @config: Configuration to get the device name from
+ *
+ * Returns:
+ * The device name. Only valid while @config is valid.
+ */
+static inline const char *
+vkms_config_get_device_name(struct vkms_config *config)
+{
+ return config->dev_name;
+}
+
+/**
+ * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration
+ * @config: Configuration to get the number of CRTCs from
+ */
+static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config)
+{
+ return list_count_nodes(&config->crtcs);
+}
+
+/**
+ * vkms_config_is_valid() - Validate a configuration
+ * @config: Configuration to validate
+ *
+ * Returns:
+ * Whether the configuration is valid or not.
+ * For example, a configuration without primary planes is not valid.
+ */
+bool vkms_config_is_valid(const struct vkms_config *config);
+
+/**
+ * vkms_config_register_debugfs() - Register a debugfs file to show the device's
+ * configuration
+ * @vkms_device: Device to register
+ */
+void vkms_config_register_debugfs(struct vkms_device *vkms_device);
+
+/**
+ * vkms_config_create_plane() - Add a new plane configuration
+ * @config: Configuration to add the plane to
+ *
+ * Returns:
+ * The new plane configuration or an error. Call vkms_config_destroy_plane() to
+ * free the returned plane configuration.
+ */
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_plane() - Remove and free a plane configuration
+ * @plane_cfg: Plane configuration to destroy
+ */
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg);
+
+/**
+ * vkms_config_plane_type() - Return the plane type
+ * @plane_cfg: Plane to get the type from
+ */
+static inline enum drm_plane_type
+vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->type;
+}
+
+/**
+ * vkms_config_plane_set_type() - Set the plane type
+ * @plane_cfg: Plane to set the type to
+ * @type: New plane type
+ */
+static inline void
+vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
+ enum drm_plane_type type)
+{
+ plane_cfg->type = type;
+}
+
+/**
+ * vkms_config_plane_attach_crtc - Attach a plane to a CRTC
+ * @plane_cfg: Plane to attach
+ * @crtc_cfg: CRTC to attach @plane_cfg to
+ */
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_plane_detach_crtc - Detach a plane from a CRTC
+ * @plane_cfg: Plane to detach
+ * @crtc_cfg: CRTC to detach @plane_cfg from
+ */
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_crtc() - Add a new CRTC configuration
+ * @config: Configuration to add the CRTC to
+ *
+ * Returns:
+ * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to
+ * free the returned CRTC configuration.
+ */
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_crtc() - Remove and free a CRTC configuration
+ * @config: Configuration to remove the CRTC from
+ * @crtc_cfg: CRTC configuration to destroy
+ */
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_get_writeback() - If a writeback connector will be created
+ * @crtc_cfg: CRTC with or without a writeback connector
+ */
+static inline bool
+vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg)
+{
+ return crtc_cfg->writeback;
+}
+
+/**
+ * vkms_config_crtc_set_writeback() - If a writeback connector will be created
+ * @crtc_cfg: Target CRTC
+ * @writeback: Enable or disable the writeback connector
+ */
+static inline void
+vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg,
+ bool writeback)
+{
+ crtc_cfg->writeback = writeback;
+}
+
+/**
+ * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple primary planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The primary plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple cursor planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The cursor plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_encoder() - Add a new encoder configuration
+ * @config: Configuration to add the encoder to
+ *
+ * Returns:
+ * The new encoder configuration or an error. Call vkms_config_destroy_encoder()
+ * to free the returned encoder configuration.
+ */
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_encoder() - Remove and free a encoder configuration
+ * @config: Configuration to remove the encoder from
+ * @encoder_cfg: Encoder configuration to destroy
+ */
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC
+ * @encoder_cfg: Encoder to attach
+ * @crtc_cfg: CRTC to attach @encoder_cfg to
+ */
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC
+ * @encoder_cfg: Encoder to detach
+ * @crtc_cfg: CRTC to detach @encoder_cfg from
+ */
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_connector() - Add a new connector configuration
+ * @config: Configuration to add the connector to
+ *
+ * Returns:
+ * The new connector configuration or an error. Call
+ * vkms_config_destroy_connector() to free the returned connector configuration.
+ */
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_connector() - Remove and free a connector configuration
+ * @connector_cfg: Connector configuration to destroy
+ */
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg);
+
+/**
+ * vkms_config_connector_attach_encoder - Attach a connector to an encoder
+ * @connector_cfg: Connector to attach
+ * @encoder_cfg: Encoder to attach @connector_cfg to
+ */
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_connector_detach_encoder - Detach a connector from an encoder
+ * @connector_cfg: Connector to detach
+ * @encoder_cfg: Encoder to detach @connector_cfg from
+ */
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
new file mode 100644
index 000000000000..48b10cba322a
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vkms_connector.h"
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Use the default modes list from DRM */
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ .best_encoder = vkms_conn_best_encoder,
+};
+
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_connector *connector;
+ int ret;
+
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
new file mode 100644
index 000000000000..c9149c1b7af0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONNECTOR_H_
+#define _VKMS_CONNECTOR_H_
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_connector - VKMS custom type wrapping around the DRM connector
+ *
+ * @drm: Base DRM connector
+ */
+struct vkms_connector {
+ struct drm_connector base;
+};
+
+/**
+ * vkms_connector_init() - Initialize a connector
+ * @vkmsdev: VKMS device containing the connector
+ *
+ * Returns:
+ * The connector or an error on failure.
+ */
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 12034ec12029..e60573e0f3e9 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -194,7 +194,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i++;
}
- vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
@@ -302,8 +302,6 @@ struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *pri
vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
if (IS_ERR(vkms_out->composer_workq))
return ERR_CAST(vkms_out->composer_workq);
- if (!vkms_out->composer_workq)
- return ERR_PTR(-ENOMEM);
return vkms_out;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index b6de91134a22..e8472d9b6e3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/dma-mapping.h>
#include <drm/clients/drm_client_setup.h>
@@ -27,11 +27,9 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
+#include "vkms_config.h"
#include "vkms_drv.h"
-#include <drm/drm_print.h>
-#include <drm/drm_debugfs.h>
-
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_MAJOR 1
@@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
}
-static int vkms_config_show(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
-
- seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
- seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
- seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
-
- return 0;
-}
-
-static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
- { "vkms_config", vkms_config_show, 0 },
-};
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.fops = &vkms_driver_fops,
@@ -168,25 +149,27 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
static int vkms_create(struct vkms_config *config)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
struct vkms_device *vkms_device;
+ const char *dev_name;
- pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ dev_name = vkms_config_get_device_name(config);
+ fdev = faux_device_create(dev_name, NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
+ vkms_device = devm_drm_dev_alloc(&fdev->dev, &vkms_driver,
struct vkms_device, drm);
if (IS_ERR(vkms_device)) {
ret = PTR_ERR(vkms_device);
goto out_devres;
}
- vkms_device->platform = pdev;
+ vkms_device->faux_dev = fdev;
vkms_device->config = config;
config->dev = vkms_device;
@@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}
- ret = drm_vblank_init(&vkms_device->drm, 1);
+ ret = drm_vblank_init(&vkms_device->drm,
+ vkms_config_get_num_crtcs(config));
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
@@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
- ARRAY_SIZE(vkms_config_debugfs_list));
+ vkms_config_register_debugfs(vkms_device);
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
@@ -220,9 +203,9 @@ static int vkms_create(struct vkms_config *config)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
@@ -231,17 +214,13 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->cursor = enable_cursor;
- config->writeback = enable_writeback;
- config->overlay = enable_overlay;
+ config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
ret = vkms_create(config);
if (ret) {
- kfree(config);
+ vkms_config_destroy(config);
return ret;
}
@@ -252,19 +231,19 @@ static int __init vkms_init(void)
static void vkms_destroy(struct vkms_config *config)
{
- struct platform_device *pdev;
+ struct faux_device *fdev;
if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = config->dev->platform;
+ fdev = config->dev->faux_dev;
drm_dev_unregister(&config->dev->drm);
drm_atomic_helper_shutdown(&config->dev->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
config->dev = NULL;
}
@@ -275,7 +254,7 @@ static void __exit vkms_exit(void)
return;
vkms_destroy(default_config);
- kfree(default_config);
+ vkms_config_destroy(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index abbb652be2b5..8013c31efe3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,6 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
+#define DEFAULT_DEVICE_NAME "vkms"
+
#define XRES_MIN 10
#define YRES_MIN 10
@@ -43,6 +45,23 @@ struct vkms_frame_info {
unsigned int rotation;
};
+/**
+ * struct pixel_argb_u16 - Internal representation of a pixel color.
+ * @a: Alpha component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @r: Red component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @g: Green component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @b: Blue component value, stored in 16 bits, without padding, using
+ * machine endianness
+ *
+ * The goal of this structure is to keep enough precision to ensure
+ * correct composition results in VKMS and simplifying color
+ * manipulation by splitting each component into its own field.
+ * Caution: the byte ordering of this structure is machine-dependent,
+ * you can't cast it directly to AR48 or xR48.
+ */
struct pixel_argb_u16 {
u16 a, r, g, b;
};
@@ -101,16 +120,34 @@ typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_st
struct pixel_argb_u16 out_pixel[]);
/**
+ * struct conversion_matrix - Matrix to use for a specific encoding and range
+ *
+ * @matrix: Conversion matrix from yuv to rgb. The matrix is stored in a row-major manner and is
+ * used to compute rgb values from yuv values:
+ * [[r],[g],[b]] = @matrix * [[y],[u],[v]]
+ * OR for yvu formats:
+ * [[r],[g],[b]] = @matrix * [[y],[v],[u]]
+ * The values of the matrix are signed fixed-point values with 32 bits fractional part.
+ * @y_offset: Offset to apply on the y value.
+ */
+struct conversion_matrix {
+ s64 matrix[3][3];
+ int y_offset;
+};
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
* @pixel_read_line: function to read a pixel line in this plane. The creator of a
* struct vkms_plane_state must ensure that this pointer is valid
+ * @conversion_matrix: matrix used for yuv formats to convert to rgb
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
pixel_read_line_t pixel_read_line;
+ struct conversion_matrix conversion_matrix;
};
struct vkms_plane {
@@ -189,32 +226,19 @@ struct vkms_output {
spinlock_t composer_lock;
};
-/**
- * struct vkms_config - General configuration for VKMS driver
- *
- * @writeback: If true, a writeback buffer can be attached to the CRTC
- * @cursor: If true, a cursor plane is created in the VKMS device
- * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
- * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
- */
-struct vkms_config {
- bool writeback;
- bool cursor;
- bool overlay;
- struct vkms_device *dev;
-};
+struct vkms_config;
/**
* struct vkms_device - Description of a VKMS device
*
* @drm - Base device in DRM
- * @platform - Associated platform device
+ * @faux_dev - Associated faux device
* @output - Configuration and sub-components of the VKMS device
* @config: Configuration used in this VKMS device
*/
struct vkms_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
const struct vkms_config *config;
};
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 30a64ecca87c..6d0227c6635a 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -7,6 +7,8 @@
#include <drm/drm_rect.h>
#include <drm/drm_fixed.h>
+#include <kunit/visibility.h>
+
#include "vkms_formats.h"
/**
@@ -140,6 +142,51 @@ static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
*addr = (u8 *)frame_info->map[0].vaddr + offset;
}
+/**
+ * get_subsampling() - Get the subsampling divisor value on a specific direction
+ *
+ * @format: format to extarct the subsampling from
+ * @direction: direction of the subsampling requested
+ */
+static int get_subsampling(const struct drm_format_info *format,
+ enum pixel_read_direction direction)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ return format->vsub;
+ case READ_RIGHT_TO_LEFT:
+ case READ_LEFT_TO_RIGHT:
+ return format->hsub;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 1;
+}
+
+/**
+ * get_subsampling_offset() - An offset for keeping the chroma siting consistent regardless of
+ * x_start and y_start values
+ *
+ * @direction: direction of the reading to properly compute this offset
+ * @x_start: x coordinate of the starting point of the readed line
+ * @y_start: y coordinate of the starting point of the readed line
+ */
+static int get_subsampling_offset(enum pixel_read_direction direction, int x_start, int y_start)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ return -y_start - 1;
+ case READ_TOP_TO_BOTTOM:
+ return y_start;
+ case READ_RIGHT_TO_LEFT:
+ return -x_start - 1;
+ case READ_LEFT_TO_RIGHT:
+ return x_start;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 0;
+}
+
/*
* The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
* &struct pixel_argb_u16
@@ -202,11 +249,54 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
return out_pixel;
}
+static struct pixel_argb_u16 argb_u16_from_gray8(u8 gray)
+{
+ return argb_u16_from_u8888(255, gray, gray, gray);
+}
+
+static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
+{
+ return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
+}
+
+VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix)
+{
+ u16 r, g, b;
+ s64 fp_y, fp_channel_1, fp_channel_2;
+ s64 fp_r, fp_g, fp_b;
+
+ fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
+ fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
+ fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+
+ fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[0][2], fp_channel_2);
+ fp_g = drm_fixp_mul(matrix->matrix[1][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[1][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[1][2], fp_channel_2);
+ fp_b = drm_fixp_mul(matrix->matrix[2][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[2][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[2][2], fp_channel_2);
+
+ fp_r = drm_fixp2int_round(fp_r);
+ fp_g = drm_fixp2int_round(fp_g);
+ fp_b = drm_fixp2int_round(fp_b);
+
+ r = clamp(fp_r, 0, 0xffff);
+ g = clamp(fp_g, 0, 0xffff);
+ b = clamp(fp_b, 0, 0xffff);
+
+ return argb_u16_from_u16161616(0xffff, r, g, b);
+}
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+
/*
* The following functions are read_line function for each pixel format supported by VKMS.
*
* They read a line starting at the point @x_start,@y_start following the @direction. The result
- * is stored in @out_pixel and in the format ARGB16161616.
+ * is stored in @out_pixel and in a 64 bits format, see struct pixel_argb_u16.
*
* These functions are very repetitive, but the innermost pixel loops must be kept inside these
* functions for performance reasons. Some benchmarking was done in [1] where having the innermost
@@ -215,6 +305,96 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
* [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
+static void Rx_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ int bits_per_pixel = drm_format_info_bpp(plane->frame_info->fb->format, 0);
+ u8 *src_pixels;
+ int rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_height(plane->frame_info->fb->format, 0) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_addr(plane->frame_info, x_start, y_start, 0, &src_pixels, &rem_x, &rem_y);
+ int bit_offset = (8 - bits_per_pixel) - rem_x * bits_per_pixel;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int mask = (0x1 << bits_per_pixel) - 1;
+ int lum_per_level = 0xFFFF / mask;
+
+ if (direction == READ_LEFT_TO_RIGHT || direction == READ_RIGHT_TO_LEFT) {
+ int restart_bit_offset;
+ int step_bit_offset;
+
+ if (direction == READ_LEFT_TO_RIGHT) {
+ restart_bit_offset = 8 - bits_per_pixel;
+ step_bit_offset = -bits_per_pixel;
+ } else {
+ restart_bit_offset = 0;
+ step_bit_offset = bits_per_pixel;
+ }
+
+ while (out_pixel < end) {
+ u8 val = ((*src_pixels) >> bit_offset) & mask;
+
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+
+ bit_offset += step_bit_offset;
+ if (bit_offset < 0 || 8 <= bit_offset) {
+ bit_offset = restart_bit_offset;
+ src_pixels += step;
+ }
+ out_pixel += 1;
+ }
+ } else if (direction == READ_TOP_TO_BOTTOM || direction == READ_BOTTOM_TO_TOP) {
+ while (out_pixel < end) {
+ u8 val = (*src_pixels >> bit_offset) & mask;
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+ }
+}
+
+static void R1_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R2_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ while (out_pixel < end) {
+ *out_pixel = argb_u16_from_gray8(*src_pixels);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+}
+
static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -332,6 +512,92 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
}
/*
+ * This callback can be used for YUV formats where U and V values are
+ * stored in the same plane (often called semi-planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *uv_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &uv_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0)
+ uv_plane += step_uv;
+ }
+}
+
+/*
+ * This callback can be used for YUV format where each color component is
+ * stored in a different plane (often called planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *channel_1_plane;
+ u8 *channel_2_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &channel_1_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 2,
+ &channel_2_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_channel_1 = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int step_channel_2 = get_block_step_bytes(plane->frame_info->fb, direction, 2);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0) {
+ channel_1_plane += step_channel_1;
+ channel_2_plane += step_channel_2;
+ }
+ }
+}
+
+/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
* The result is stored in @out_pixel.
*
@@ -456,6 +722,28 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ return &semi_planar_yuv_read_line;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return &planar_yuv_read_line;
+ case DRM_FORMAT_R1:
+ return &R1_read_line;
+ case DRM_FORMAT_R2:
+ return &R2_read_line;
+ case DRM_FORMAT_R4:
+ return &R4_read_line;
+ case DRM_FORMAT_R8:
+ return &R8_read_line;
default:
/*
* This is a bug in vkms_plane_atomic_check(). All the supported
@@ -469,6 +757,183 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
}
}
+/*
+ * Those matrices were generated using the colour python framework
+ *
+ * Below are the function calls used to generate each matrix, go to
+ * https://colour.readthedocs.io/en/develop/generated/colour.matrix_YCbCr.html
+ * for more info:
+ *
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix no_operation = {
+ .matrix = {
+ { 4294967296, 0, 0, },
+ { 0, 4294967296, 0, },
+ { 0, 0, 4294967296, },
+ },
+ .y_offset = 0,
+};
+
+static const struct conversion_matrix yuv_bt601_full = {
+ .matrix = {
+ { 4294967296, 0, 6021544149 },
+ { 4294967296, -1478054095, -3067191994 },
+ { 4294967296, 7610682049, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt601_limited = {
+ .matrix = {
+ { 5020601039, 0, 6881764740 },
+ { 5020601039, -1689204679, -3505362278 },
+ { 5020601039, 8697922339, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_full = {
+ .matrix = {
+ { 4294967296, 0, 6763714498 },
+ { 4294967296, -804551626, -2010578443 },
+ { 4294967296, 7969741314, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_limited = {
+ .matrix = {
+ { 5020601039, 0, 7729959424 },
+ { 5020601039, -919487572, -2297803934 },
+ { 5020601039, 9108275786, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_full = {
+ .matrix = {
+ { 4294967296, 0, 6333358775 },
+ { 4294967296, -706750298, -2453942994 },
+ { 4294967296, 8080551471, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_limited = {
+ .matrix = {
+ { 5020601039, 0, 7238124312 },
+ { 5020601039, -807714626, -2804506279 },
+ { 5020601039, 9234915964, 0 },
+ },
+ .y_offset = 16,
+};
+
+/**
+ * swap_uv_columns() - Swap u and v column of a given matrix
+ *
+ * @matrix: Matrix in which column are swapped
+ */
+static void swap_uv_columns(struct conversion_matrix *matrix)
+{
+ swap(matrix->matrix[0][2], matrix->matrix[0][1]);
+ swap(matrix->matrix[1][2], matrix->matrix[1][1]);
+ swap(matrix->matrix[2][2], matrix->matrix[2][1]);
+}
+
+/**
+ * get_conversion_matrix_to_argb_u16() - Retrieve the correct yuv to rgb conversion matrix for a
+ * given encoding and range.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ * @encoding: DRM_COLOR_* value for which to obtain a conversion matrix
+ * @range: DRM_COLOR_*_RANGE value for which to obtain a conversion matrix
+ * @matrix: Pointer to store the value into
+ */
+void get_conversion_matrix_to_argb_u16(u32 format,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix)
+{
+ const struct conversion_matrix *matrix_to_copy;
+ bool limited_range;
+
+ switch (range) {
+ case DRM_COLOR_YCBCR_LIMITED_RANGE:
+ limited_range = true;
+ break;
+ case DRM_COLOR_YCBCR_FULL_RANGE:
+ limited_range = false;
+ break;
+ case DRM_COLOR_RANGE_MAX:
+ limited_range = false;
+ WARN_ONCE(true, "The requested range is not supported.");
+ break;
+ }
+
+ switch (encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ matrix_to_copy = limited_range ? &yuv_bt601_limited :
+ &yuv_bt601_full;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ matrix_to_copy = limited_range ? &yuv_bt709_limited :
+ &yuv_bt709_full;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ matrix_to_copy = limited_range ? &yuv_bt2020_limited :
+ &yuv_bt2020_full;
+ break;
+ case DRM_COLOR_ENCODING_MAX:
+ matrix_to_copy = &no_operation;
+ WARN_ONCE(true, "The requested encoding is not supported.");
+ break;
+ }
+
+ memcpy(matrix, matrix_to_copy, sizeof(*matrix_to_copy));
+
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ swap_uv_columns(matrix);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(get_conversion_matrix_to_argb_u16);
+
/**
* get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format.
* The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index 8d2bef95ff79..b4fe62ab9c65 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -9,4 +9,13 @@ pixel_read_line_t get_pixel_read_line_function(u32 format);
pixel_write_t get_pixel_write_function(u32 format);
+void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix);
+#endif
+
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 22f0d678af3a..8d7ca0cdd79f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,121 +1,111 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
+#include "vkms_connector.h"
#include "vkms_drv.h"
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
-#include <drm/drm_probe_helper.h>
-
-static const struct drm_connector_funcs vkms_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int vkms_conn_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Use the default modes list from DRM */
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
-
- return count;
-}
-
-static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
- .get_modes = vkms_conn_get_modes,
-};
int vkms_output_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct vkms_output *output;
- struct vkms_plane *primary, *overlay, *cursor = NULL;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
int ret;
int writeback;
- unsigned int n;
-
- /*
- * Initialize used plane. One primary plane is required to perform the composition.
- *
- * The overlay and cursor planes are not mandatory, but can be used to perform complex
- * composition.
- */
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor))
- return PTR_ERR(cursor);
- }
- output = vkms_crtc_init(dev, &primary->base,
- cursor ? &cursor->base : NULL);
- if (IS_ERR(output)) {
- DRM_ERROR("Failed to allocate CRTC\n");
- return PTR_ERR(output);
- }
+ if (!vkms_config_is_valid(vkmsdev->config))
+ return -EINVAL;
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(overlay)) {
- DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
- return PTR_ERR(overlay);
- }
- overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ if (IS_ERR(plane_cfg->plane)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(plane_cfg->plane);
}
}
- connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return -ENOMEM;
- }
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ struct vkms_config_plane *primary, *cursor;
- ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init connector\n");
- return ret;
- }
+ primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg);
+ cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg);
- drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+ crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base,
+ cursor ? &cursor->plane->base : NULL);
+ if (IS_ERR(crtc_cfg->crtc)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(crtc_cfg->crtc);
+ }
- encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder) {
- DRM_ERROR("Failed to allocate encoder\n");
- return -ENOMEM;
+ /* Initialize the writeback component */
+ if (vkms_config_crtc_get_writeback(crtc_cfg)) {
+ writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
}
- ret = drmm_encoder_init(dev, encoder, NULL,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- return ret;
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ plane_cfg->plane->base.possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
- /* Attach the encoder and the connector */
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- return ret;
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL);
+ if (!encoder_cfg->encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ return ret;
+ }
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ encoder_cfg->encoder->possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- /* Initialize the writeback component */
- if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev, output);
- if (writeback)
- DRM_ERROR("Failed to init writeback connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ connector_cfg->connector = vkms_connector_init(vkmsdev);
+ if (IS_ERR(connector_cfg->connector)) {
+ DRM_ERROR("Failed to init connector\n");
+ return PTR_ERR(connector_cfg->connector);
+ }
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg,
+ idx,
+ possible_encoder) {
+ ret = drm_connector_attach_encoder(&connector_cfg->connector->base,
+ possible_encoder->encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ return ret;
+ }
+ }
}
drm_mode_config_reset(dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e34f8c7f83c3..e3fdd161d0f0 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -18,7 +18,23 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV42,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ DRM_FORMAT_R4,
+ DRM_FORMAT_R8,
};
static struct drm_plane_state *
@@ -119,6 +135,8 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info->rotation = new_state->rotation;
vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
+ get_conversion_matrix_to_argb_u16(fmt, new_state->color_encoding, new_state->color_range,
+ &vkms_plane_state->conversion_matrix);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -205,5 +223,14 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6c3c2922ae8b..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_CLIENT_SELECTION
select DRM_TTM
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 64bd7d74854e..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
void *ptr = NULL;
int ret;
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
if (ret) {
drm_dbg_driver(&vmw->drm,
@@ -447,7 +447,7 @@ out:
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
{
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
else
vmw_bo_unmap(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index dd4ca6a9c690..8fe02131a6c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..8ff958d119be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -460,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
@@ -474,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
@@ -733,7 +715,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, "vmwgfx probe");
+ ret = pcim_request_all_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
@@ -753,7 +735,6 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
- pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
@@ -769,16 +750,14 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
fifo_size,
- MEMREMAP_WB);
+ MEMREMAP_WB | MEMREMAP_DEC);
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
- pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
- pci_release_regions(pdev);
return -EINVAL;
}
@@ -856,7 +835,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
vmw_sw_context_init(dev_priv);
@@ -872,7 +850,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
- goto out_no_pci_or_version;
+ return ret;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -1172,15 +1150,13 @@ out_err0:
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-out_no_pci_or_version:
- pci_release_regions(pdev);
+
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1216,8 +1192,6 @@ static void vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
-
- pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
@@ -1324,9 +1298,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..eda5b6f8f4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -546,7 +522,7 @@ struct vmw_private {
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1032,15 +1006,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
- int *waiter_count);
-extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
- u32 flag, int *waiter_count);
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -1050,7 +1023,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1039,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1364,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..819704ac675d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -3896,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
@@ -4512,8 +4493,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 588d50ababf6..c2294abbe753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,32 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-#include <linux/sched/signal.h>
-
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -35,14 +14,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
u64 ctx;
};
@@ -52,12 +24,10 @@ struct vmw_user_fence {
};
/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
+ * struct vmw_event_fence_action - fence callback that delivers a DRM event.
*
- * @action: A struct vmw_fence_action to hook up to a fence.
+ * @base: For use with dma_fence_add_callback(...)
* @event: A pointer to the pending event.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
@@ -65,10 +35,9 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct vmw_fence_action action;
+ struct dma_fence_cb base;
struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
struct drm_device *dev;
uint32_t *tv_sec;
@@ -81,44 +50,6 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-static u32 vmw_fence_goal_read(struct vmw_private *vmw)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
- else
- return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
-}
-
-static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
- else
- vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
-}
-
-/*
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
@@ -126,8 +57,21 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
+ /* The fence manager still has an implicit reference to this
+ * fence via the fence list if head is set. Because the lock is
+ * required to be held when the fence manager updates the fence
+ * list either the fence will have been removed after we get
+ * the lock below or we can safely remove it and the fence
+ * manager will never see it. This implies the fence is being
+ * deleted without being signaled which is dubious but valid
+ * if there are no callbacks. The dma_fence code that calls
+ * this hook will warn about deleted unsignaled with callbacks
+ * so no need to warn again here.
+ */
spin_lock(&fman->lock);
list_del_init(&fence->head);
+ if (fence->waiter_added)
+ vmw_seqno_waiter_remove(fman->dev_priv);
spin_unlock(&fman->lock);
}
fence->destroy(fence);
@@ -143,165 +87,46 @@ static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
return "svga";
}
+/* When we toggle signaling for the SVGA device there is a race period from
+ * the time we first read the fence seqno to the time we enable interrupts.
+ * If we miss the interrupt for a fence during this period its likely the driver
+ * will stall. As a result we need to re-read the seqno after interrupts are
+ * enabled. If interrupts were already enabled we just increment the number of
+ * seqno waiters.
+ */
static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
+ u32 seqno;
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
-
- u32 seqno = vmw_fence_read(dev_priv);
- if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+check_for_race:
+ seqno = vmw_fence_read(dev_priv);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(dev_priv);
+ fence->waiter_added = false;
+ }
return false;
-
+ } else if (!fence->waiter_added) {
+ fence->waiter_added = true;
+ if (vmw_seqno_waiter_add(dev_priv))
+ goto check_for_race;
+ }
return true;
}
-struct vmwgfx_wait_cb {
- struct dma_fence_cb base;
- struct task_struct *task;
-};
-
-static void
-vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct vmwgfx_wait_cb *wait =
- container_of(cb, struct vmwgfx_wait_cb, base);
-
- wake_up_process(wait->task);
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
-static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
-{
- struct vmw_fence_obj *fence =
- container_of(f, struct vmw_fence_obj, base);
-
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_private *dev_priv = fman->dev_priv;
- struct vmwgfx_wait_cb cb;
- long ret = timeout;
-
- if (likely(vmw_fence_obj_signaled(fence)))
- return timeout;
-
- vmw_seqno_waiter_add(dev_priv);
-
- spin_lock(f->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- goto out;
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- cb.base.func = vmwgfx_wait_cb;
- cb.task = current;
- list_add(&cb.base.node, &f->cb_list);
-
- for (;;) {
- __vmw_fences_update(fman);
-
- /*
- * We can use the barrier free __set_current_state() since
- * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
- * fence spinlock.
- */
- if (intr)
- __set_current_state(TASK_INTERRUPTIBLE);
- else
- __set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
- if (ret == 0 && timeout > 0)
- ret = 1;
- break;
- }
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (ret == 0)
- break;
-
- spin_unlock(f->lock);
-
- ret = schedule_timeout(ret);
-
- spin_lock(f->lock);
- }
- __set_current_state(TASK_RUNNING);
- if (!list_empty(&cb.base.node))
- list_del(&cb.base.node);
-
-out:
- spin_unlock(f->lock);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- return ret;
-}
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
- .wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
-/*
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
@@ -312,10 +137,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
return fman;
@@ -325,11 +147,8 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
bool lists_empty;
- (void) cancel_work_sync(&fman->work);
-
spin_lock(&fman->lock);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
+ lists_empty = list_empty(&fman->fence_list);
spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
@@ -344,7 +163,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
- INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
spin_lock(&fman->lock);
@@ -352,6 +170,11 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
ret = -EBUSY;
goto out_unlock;
}
+ /* This creates an implicit reference to the fence from the fence
+ * manager. It will be dropped when the fence is signaled which is
+ * expected to happen before deletion. The dtor has code to catch
+ * the rare deletion before signaling case.
+ */
list_add_tail(&fence->head, &fman->fence_list);
out_unlock:
@@ -360,148 +183,35 @@ out_unlock:
}
-static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- struct vmw_fence_obj *fence, *next_fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- vmw_fence_goal_write(fman->dev_priv,
- fence->base.seqno);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- u32 goal_seqno;
-
- if (dma_fence_is_signaled_locked(&fence->base))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(fman->seqno_valid &&
- goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
- return false;
-
- vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
- fman->seqno_valid = true;
-
- return true;
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
+ const bool cookie = dma_fence_begin_signalling();
+ const u32 seqno = vmw_fence_read(fman->dev_priv);
- seqno = vmw_fence_read(fman->dev_priv);
-rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(fman->dev_priv);
+ fence->waiter_added = false;
+ }
dma_fence_signal_locked(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
} else
break;
}
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
- if (unlikely(needs_rerun)) {
- new_seqno = vmw_fence_read(fman->dev_priv);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
+ dma_fence_end_signalling(cookie);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -539,14 +249,13 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
- ret = vmw_fence_obj_init(fman, fence, seqno,
- vmw_fence_destroy);
+ ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -638,7 +347,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- struct list_head action_list;
int ret;
/*
@@ -661,10 +369,6 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
dma_fence_signal(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
}
BUG_ON(!list_empty(&fence->head));
@@ -778,7 +482,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -787,14 +490,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
@@ -822,10 +522,11 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
*/
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
+ container_of(cb, struct vmw_event_fence_action, base);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
@@ -837,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_get_ts64(&ts);
+ ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -846,75 +547,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
spin_unlock_irq(&dev->event_lock);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
-
- vmw_fence_obj_unreference(&eaction->fence);
+ dma_fence_put(f);
kfree(eaction);
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence: The fence object.
- * @action: The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock(&fman->lock);
-
- fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock(&fman->lock);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
-}
-
/**
* vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
@@ -949,18 +585,14 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
return -ENOMEM;
eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
-
- eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- vmw_fence_obj_add_action(fence, &eaction->action);
-
+ vmw_fence_obj_reference(fence); // Dropped in CB
+ if (dma_fence_add_callback(&fence->base, &eaction->base,
+ vmw_event_fence_action_seq_passed) < 0)
+ vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index a7eee579c76a..e897cccae1ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -39,27 +39,10 @@ struct drm_pending_event;
struct vmw_private;
struct vmw_fence_manager;
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
struct vmw_fence_obj {
struct dma_fence base;
-
+ bool waiter_added;
struct list_head head;
- struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
};
@@ -86,7 +69,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..eedf1fe60be7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -84,7 +84,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
@@ -101,7 +101,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
drm_gem_ttm_vunmap(obj, map);
@@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
@@ -298,11 +284,10 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->tbo.base.size, placement, type);
- seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d",
bo->tbo.priority,
bo->tbo.pin_count,
- kref_read(&bo->tbo.base.refcount),
- kref_read(&bo->tbo.kref));
+ kref_read(&bo->tbo.base.refcount));
seq_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..05773eb394d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
@@ -239,51 +230,59 @@ out_err:
return ret;
}
-void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..54ea1b513950 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1289,6 +500,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1337,7 +549,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd);
memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
vmw_user_object_ref(&vfbs->uo);
@@ -1375,6 +587,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1390,6 +603,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1422,7 +636,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
}
vfbd->base.base.obj[0] = &bo->tbo.base;
- drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
*out = &vfbd->base;
@@ -1467,11 +681,13 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* @dev_priv: Pointer to device private struct.
* @uo: Pointer to user object to wrap the kms framebuffer around.
* Either the buffer or surface inside the user object must be NULL.
+ * @info: pixel format information.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
@@ -1480,10 +696,10 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
/* Create the new framebuffer depending one what we have */
if (vmw_user_object_surface(uo)) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else if (uo->buffer) {
ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else {
BUG();
}
@@ -1500,11 +716,14 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1527,13 +746,15 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +763,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1203,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..445471fe9be6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -453,6 +399,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
@@ -460,19 +407,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 7fb1c88bcc47..69dfe69ce0f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true,
+ .pin = false,
.keep_resv = true,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 5c2f459a2925..714d5702dfd7 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
- tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ tristate "Intel Xe2 Graphics"
+ depends on DRM && PCI
+ depends on KUNIT || !KUNIT
+ depends on INTEL_VSEC || !INTEL_VSEC
+ depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
+ depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -27,10 +31,8 @@ config DRM_XE
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
- select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
- select IOSF_MBI
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
@@ -39,14 +41,15 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
- select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
select HMM_MIRROR
+ select REGMAP if I2C
help
- Experimental driver for Intel Xe series GPUs
+ Driver for Intel Xe2 series GPUs and later. Experimental support
+ for Xe series is also available.
If "M" is selected, the module will be called xe.
@@ -74,14 +77,29 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
-config DRM_XE_DEVMEM_MIRROR
- bool "Enable device memory mirror"
+config DRM_XE_GPUSVM
+ bool "Enable CPU to GPU address mirroring"
depends on DRM_XE
+ depends on !UML
+ depends on DEVICE_PRIVATE
+ default y
+ select DRM_GPUSVM
+ help
+ Enable this option if you want support for CPU to GPU address
+ mirroring.
+
+ If in doubt say "Y".
+
+config DRM_XE_PAGEMAP
+ bool "Enable device memory pool for SVM"
+ depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
- Disable this option only if you want to compile out without device
- memory mirror. Will reduce KMD memory footprint when disabled.
+ Disable this option only if you don't want to expose local device
+ memory for SVM. Will reduce KMD memory footprint when disabled.
+
+ If in doubt say "Y".
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 0d749ed44878..01735c6ece8b 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -86,12 +86,17 @@ config DRM_XE_KUNIT_TEST
If in doubt, say "N".
-config DRM_XE_LARGE_GUC_BUFFER
- bool "Enable larger guc log buffer"
+config DRM_XE_DEBUG_GUC
+ bool "Enable extra GuC related debug options"
+ depends on DRM_XE_DEBUG
default n
+ select STACKDEPOT
help
Choose this option when debugging guc issues.
- Buffer should be large enough for complex issues.
+ The GuC log buffer is increased to the maximum allowed, which should
+ be large enough for complex issues. The tracking of FAST_REQ messages
+ is extended to include a record of the calling stack, which is then
+ dumped on a FAST_REQ error notification.
Recommended for driver developers only.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 9699b08585f7..07c71a29963d 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -21,6 +21,13 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
+generated_device_oob := $(obj)/generated/xe_device_wa_oob.c $(obj)/generated/xe_device_wa_oob.h
+quiet_cmd_device_wa_oob = GEN $(notdir $(generated_device_oob))
+ cmd_device_wa_oob = mkdir -p $(@D); $^ $(generated_device_oob)
+$(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe_gen_wa_oob \
+ $(src)/xe_device_wa_oob.rules
+ $(call cmd,device_wa_oob)
+
# Please keep these build lists sorted!
# core driver code
@@ -80,6 +87,7 @@ xe-y += xe_bb.o \
xe_mmio.o \
xe_mocs.o \
xe_module.o \
+ xe_nvm.o \
xe_oa.o \
xe_observation.o \
xe_pat.o \
@@ -124,13 +132,15 @@ xe-y += xe_bb.o \
xe_wait_user_fence.o \
xe_wopcm.o
+xe-$(CONFIG_I2C) += xe_i2c.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
-xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o
# graphics virtualization (SR-IOV) support
xe-y += \
@@ -138,7 +148,8 @@ xe-y += \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o \
- xe_sriov_vf.o
+ xe_sriov_vf.o \
+ xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
@@ -152,7 +163,8 @@ xe-$(CONFIG_PCI_IOV) += \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
xe_pci_sriov.o \
- xe_sriov_pf.o
+ xe_sriov_pf.o \
+ xe_sriov_pf_service.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -185,7 +197,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
- display/xe_display_rps.o \
+ display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
@@ -196,7 +208,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o \
i915-soc/intel_rom.o
# Display code shared with i915
@@ -204,7 +215,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/icl_dsi.o \
i915-display/intel_alpm.o \
i915-display/intel_atomic.o \
- i915-display/intel_atomic_plane.o \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
@@ -254,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_fbc.o \
i915-display/intel_fdi.o \
i915-display/intel_fifo_underrun.o \
+ i915-display/intel_flipq.o \
i915-display/intel_frontbuffer.o \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
@@ -270,7 +281,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
+ i915-display/intel_plane.o \
i915-display/intel_pmdemand.o \
+ i915-display/intel_pch.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
@@ -336,4 +349,4 @@ $(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
-$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h $(obj)/generated/xe_device_wa_oob.h
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index ec516e838ee8..81eb046aeebf 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -141,6 +141,8 @@ enum xe_guc_action {
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
+ XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV = 0x550E,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
@@ -160,6 +162,37 @@ enum xe_guc_preempt_options {
XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
};
+enum xe_guc_register_context_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_DATA_2_CONTEXT_INDEX,
+ XE_GUC_REGISTER_CONTEXT_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MSG_LEN,
+};
+
+enum xe_guc_register_context_multi_lrc_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_2_PARENT_CONTEXT,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
@@ -239,4 +272,7 @@ enum xe_guc_g2g_type {
#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+/* invalid type for XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR */
+#define XE_GUC_CAT_ERR_TYPE_INVALID 0xdeadbeef
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index 2c627a21648f..ecf748fd87df 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -6,8 +6,7 @@
#ifndef _ABI_GUC_ERRORS_ABI_H
#define _ABI_GUC_ERRORS_ABI_H
-enum xe_guc_response_status {
- XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+enum xe_guc_response {
XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04,
XE_GUC_RESPONSE_INVALID_STATE = 0x0A,
XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B,
@@ -21,12 +20,20 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41,
XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50,
XE_GUC_RESPONSE_INVALID_PARAMS = 0x60,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_INDEX = 0x61,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_REGISTRATION = 0x62,
+ XE_GUC_RESPONSE_INVALID_DOORBELL_ID = 0x63,
+ XE_GUC_RESPONSE_INVALID_ENGINE_ID = 0x64,
XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70,
XE_GUC_RESPONSE_INVALID_BUFFER = 0x71,
+ XE_GUC_RESPONSE_BUFFER_ALREADY_REGISTERED = 0x72,
XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80,
XE_GUC_RESPONSE_PENDING_ACTION = 0x90,
+ XE_GUC_RESPONSE_CONTEXT_NOT_REGISTERED = 0x100,
+ XE_GUC_RESPONSE_CONTEXT_ALREADY_REGISTERED = 0X101,
XE_GUC_RESPONSE_INVALID_SIZE = 0x102,
XE_GUC_RESPONSE_MALFORMED_KLV = 0x103,
+ XE_GUC_RESPONSE_INVALID_CONTEXT = 0x104,
XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105,
XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106,
XE_GUC_RESPONSE_VF_MIGRATED = 0x107,
@@ -40,10 +47,11 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304,
XE_GUC_RESPONSE_CTB_IN_USE = 0x305,
XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306,
+ XE_GUC_RESPONSE_HW_TIMEOUT = 0x30C,
XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D,
XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E,
XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F,
- XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+ XE_GUC_RESPONSE_GENERIC_FAIL = 0xF000,
};
enum xe_guc_load_status {
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index d633f1c739e4..0366a9da5977 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -16,6 +16,7 @@
* +===+=======+==============================================================+
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
+ * | | | - `GuC Opt In Feature KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -125,6 +126,33 @@ enum {
};
/**
+ * DOC: GuC Opt In Feature KLVs
+ *
+ * `GuC KLV`_ keys available for use with OPT_IN_FEATURE_KLV
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE` : 0x4001
+ * Adds an extra dword to the XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR G2H
+ * containing the type of the CAT error. On HW that does not support
+ * reporting the CAT error type, the extra dword is set to 0xdeadbeef.
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH` : 0x4003
+ * This KLV enables the Dynamic Inhibit Context Switch optimization, which
+ * consists in the GuC setting the CTX_CTRL_INHIBIT_SYN_CTX_SWITCH bit to
+ * zero in the CTX_CONTEXT_CONTROL register of LRCs that are submitted
+ * to an oversubscribed engine. This will cause those contexts to be
+ * switched out immediately if they hit an unsatisfied semaphore wait
+ * (instead of waiting the full timeslice duration). The bit is instead set
+ * to one if a single context is queued on the engine, to avoid it being
+ * switched out if there isn't another context that can run in its place.
+ */
+
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u
+
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
@@ -367,6 +395,7 @@ enum xe_guc_klv_ids {
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
deleted file mode 100644
index 21fec9cc837c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RPS_H__
-#define __INTEL_RPS_H__
-
-#define gen5_rps_irq_handler(x) ({})
-
-#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index dfec5108d2c3..9b7572e06f34 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -22,28 +21,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
+/* compat platform checks only for soc/ usage */
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
-#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define IS_I830(dev_priv) (dev_priv && 0)
-#define IS_I845G(dev_priv) (dev_priv && 0)
-#define IS_I85X(dev_priv) (dev_priv && 0)
-#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
-#define IS_I945G(dev_priv) (dev_priv && 0)
-#define IS_I945GM(dev_priv) (dev_priv && 0)
-#define IS_I965G(dev_priv) (dev_priv && 0)
-#define IS_I965GM(dev_priv) (dev_priv && 0)
-#define IS_G45(dev_priv) (dev_priv && 0)
-#define IS_GM45(dev_priv) (dev_priv && 0)
-#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_G33(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
-#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
-#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
@@ -71,39 +54,10 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
-#define IS_TIGERLAKE_UY(xe) (xe && 0)
-#define IS_COMETLAKE_ULX(xe) (xe && 0)
-#define IS_COFFEELAKE_ULX(xe) (xe && 0)
-#define IS_KABYLAKE_ULX(xe) (xe && 0)
-#define IS_SKYLAKE_ULX(xe) (xe && 0)
-#define IS_HASWELL_ULX(xe) (xe && 0)
-#define IS_COMETLAKE_ULT(xe) (xe && 0)
-#define IS_COFFEELAKE_ULT(xe) (xe && 0)
-#define IS_KABYLAKE_ULT(xe) (xe && 0)
-#define IS_SKYLAKE_ULT(xe) (xe && 0)
-
-#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
-#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
-#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
-#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
-#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#ifdef CONFIG_ARM64
-/*
- * arm64 indirectly includes linux/rtc.h,
- * which defines a irq_lock, so include it
- * here before #define-ing it
- */
-#include <linux/rtc.h>
-#endif
-
-#define irq_lock irq.lock
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index a473aa6697d0..4fcd3bf6b76f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -6,37 +6,6 @@
#ifndef __INTEL_PCODE_H__
#define __INTEL_PCODE_H__
-#include "intel_uncore.h"
#include "xe_pcode.h"
-static inline int
-snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
-{
- return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
- slow_timeout_ms ?: 1);
-}
-
-static inline int
-snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
-{
-
- return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
-}
-
-static inline int
-snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
-{
- return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
-}
-
-static inline int
-skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- int timeout_base_ms)
-{
- return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
- timeout_base_ms);
-}
-
#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 274042bff1be..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline bool
-intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return pm_runtime_suspended(xe->drm.dev);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
-
- return INTEL_WAKEREF_DEF;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 0c1e88e36a1e..d012f02bc84f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -24,13 +24,6 @@ static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncor
return xe_root_tile_mmio(xe);
}
-static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
-{
- struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
-
- return xe_device_get_root_tile(xe);
-}
-
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
@@ -110,12 +103,13 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout)
+ u32 value, unsigned int timeout,
+ u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, NULL, false);
+ timeout * USEC_PER_MSEC, out_value, false);
}
static inline int
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
deleted file mode 100644
index 9c46556d33a4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
new file mode 100644
index 000000000000..69e1935e9cdf
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+static inline void vlv_iosf_sb_get(struct drm_device *drm, unsigned long ports)
+{
+}
+static inline u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
+{
+ return 0;
+}
+static inline int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
+{
+ return 0;
+}
+static inline void vlv_iosf_sb_put(struct drm_device *drm, unsigned long ports)
+{
+}
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
index 949f134ce3cf..cb7fa8e794a6 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
@@ -3,4 +3,4 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/vlv_sideband_reg.h"
+#include "../../i915/vlv_iosf_sb_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
deleted file mode 100644
index ec6f12de5727..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum pipe;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- return 0;
-}
-static inline void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
-}
-static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg)
-{
- return 0;
-}
-static inline void vlv_dpio_write(struct drm_i915_private *i915,
- int pipe, int reg, u32 val)
-{
-}
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- return 0;
-}
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- return 0;
-}
-static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return 0;
-}
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 27437c22bd70..910632f57c3d 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,7 +1,12 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
+#include <drm/drm_panic.h>
+
+#include "intel_fb.h"
+#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
@@ -59,3 +64,89 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
+
+struct xe_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct xe_framebuffer {
+ struct intel_framebuffer base;
+ struct xe_panic_data panic;
+};
+
+static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct xe_framebuffer, base)->panic;
+}
+
+static void xe_panic_kunmap(struct xe_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ xe_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
+ panic->page);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ struct xe_framebuffer *xe_fb;
+
+ xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
+ if (xe_fb)
+ return &xe_fb->base;
+ return NULL;
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ xe_panic_kunmap(panic);
+ panic->page = -1;
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 3a1e505ff182..fba9617a75a5 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -5,6 +5,7 @@
#include <drm/drm_fb_helper.h>
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -45,7 +46,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +57,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
}
if (IS_ERR(obj)) {
@@ -65,7 +66,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
goto err;
}
- fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd);
+ fb = intel_framebuffer_create(&obj->ttm.base,
+ drm_get_format_info(dev,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
@@ -79,11 +84,11 @@ err:
return ERR_CAST(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_gem_object *_obj, struct i915_vma *vma)
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 0b0aca7a25af..e2e0771cf274 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -20,6 +20,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -38,7 +39,9 @@
static bool has_display(struct xe_device *xe)
{
- return HAS_DISPLAY(&xe->display);
+ struct intel_display *display = xe->display;
+
+ return HAS_DISPLAY(display);
}
/**
@@ -46,6 +49,8 @@ static bool has_display(struct xe_device *xe)
* early on
* @pdev: PCI device
*
+ * Note: This is called before xe or display device creation.
+ *
* Returns: true if probe needs to be deferred, false otherwise
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
@@ -63,6 +68,8 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
* Set features and function hooks in @driver that are needed for driving the
* display IP. This sets the driver's capability of driving display, regardless
* if the device has it enabled
+ *
+ * Note: This is called before xe or display device creation.
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
@@ -81,37 +88,10 @@ static void unset_display_features(struct xe_device *xe)
xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
-static void display_destroy(struct drm_device *dev, void *dummy)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- destroy_workqueue(xe->display.hotplug.dp_wq);
-}
-
-/**
- * xe_display_create - create display struct
- * @xe: XE device instance
- *
- * Initialize all fields used by the display part.
- *
- * TODO: once everything can be inside a single struct, make the struct opaque
- * to the rest of xe and return it to be xe->display.
- *
- * Returns: 0 on success
- */
-int xe_display_create(struct xe_device *xe)
-{
- spin_lock_init(&xe->display.fb_tracking.lock);
-
- xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
-
- return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
-}
-
static void xe_display_fini_early(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -124,7 +104,7 @@ static void xe_display_fini_early(void *arg)
int xe_display_init_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -133,9 +113,6 @@ int xe_display_init_early(struct xe_device *xe)
/* Fake uncore lock */
spin_lock_init(&xe->uncore.lock);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(xe);
-
intel_display_driver_early_probe(display);
/* Early display init.. */
@@ -145,9 +122,11 @@ int xe_display_init_early(struct xe_device *xe)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(xe);
+ err = intel_dram_detect(xe);
+ if (err)
+ goto err_opregion;
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
intel_display_device_info_runtime_init(display);
@@ -171,9 +150,9 @@ err_opregion:
static void xe_display_fini(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
- intel_hpd_poll_fini(xe);
+ intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
intel_display_driver_remove(display);
@@ -181,7 +160,7 @@ static void xe_display_fini(void *arg)
int xe_display_init(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -196,7 +175,7 @@ int xe_display_init(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -207,7 +186,7 @@ void xe_display_register(struct xe_device *xe)
void xe_display_unregister(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -220,16 +199,18 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -240,19 +221,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -287,7 +272,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
static void xe_display_enable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -305,12 +290,12 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
if (has_display(xe))
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
static void xe_display_disable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -322,10 +307,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_display_driver_init_hw(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -334,7 +319,7 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
void xe_display_pm_suspend(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -355,11 +340,11 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&xe->display);
+ intel_encoder_suspend_all(display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
@@ -369,7 +354,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
void xe_display_pm_shutdown(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -385,7 +370,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe))
intel_display_driver_suspend_access(display);
@@ -400,6 +385,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
+ struct intel_display *display = xe->display;
+
if (!xe->info.probe_display)
return;
@@ -408,12 +395,12 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
}
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -424,7 +411,7 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -442,7 +429,7 @@ void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
void xe_display_pm_shutdown_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -457,7 +444,7 @@ void xe_display_pm_shutdown_late(struct xe_device *xe)
void xe_display_pm_resume_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -467,7 +454,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
void xe_display_pm_resume(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -482,7 +469,7 @@ void xe_display_pm_resume(struct xe_device *xe)
if (has_display(xe))
intel_display_driver_resume_access(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe)) {
intel_display_driver_resume(display);
@@ -491,7 +478,7 @@ void xe_display_pm_resume(struct xe_device *xe)
}
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -502,6 +489,8 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
+ struct intel_display *display = xe->display;
+
if (!xe->info.probe_display)
return;
@@ -510,9 +499,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
return;
}
- intel_hpd_init(xe);
- intel_hpd_poll_disable(xe);
- skl_watermark_ipc_update(xe);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
}
@@ -523,6 +512,17 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+/**
+ * xe_display_probe - probe display and create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
int xe_display_probe(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -533,11 +533,15 @@ int xe_display_probe(struct xe_device *xe)
goto no_display;
display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return PTR_ERR(display);
err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
+ xe->display = display;
+
if (has_display(xe))
return 0;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 46e14f8dee28..e533aa4750bc 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -15,8 +15,6 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-int xe_display_create(struct xe_device *xe);
-
int xe_display_probe(struct xe_device *xe);
int xe_display_init_early(struct xe_device *xe);
@@ -46,8 +44,6 @@ static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0
static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { }
static inline void xe_display_driver_remove(struct xe_device *xe) {}
-static inline int xe_display_create(struct xe_device *xe) { return 0; }
-
static inline int xe_display_probe(struct xe_device *xe) { return 0; }
static inline int xe_display_init_early(struct xe_device *xe) { return 0; }
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..3825376e98cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return to_xe_device(display->drm);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
deleted file mode 100644
index fa616f9688a5..000000000000
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_display_rps.h"
-
-void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
-}
-
-void intel_display_rps_mark_interactive(struct intel_display *display,
- struct intel_atomic_state *state,
- bool interactive)
-{
-}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..68d1387d81a0 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -3,14 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-
#include "xe_device.h"
#include "xe_wa.h"
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index f95375451e2f..9f941fc2e36b 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
- xe_device_l2_flush(xe);
}
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
@@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
- xe_device_l2_flush(xe);
}
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
@@ -74,9 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
+ struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
/*
* The memory barrier here is to ensure coherency of DSB vs MMIO,
* both for weak ordering archs and discrete cards.
*/
- xe_device_wmb(dsb_buf->vma->bo->tile->xe);
+ xe_device_wmb(xe);
+ xe_device_l2_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d918ae1c8061..c38fba18effe 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -6,6 +6,7 @@
#include <drm/ttm/ttm_bo.h>
#include "i915_vma.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -23,6 +24,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
* by writing dpt/ggtt in a different order?
@@ -32,10 +34,9 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(map, *dpt_ofs, u64, pte);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx -= src_stride;
}
@@ -55,17 +56,15 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
{
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
- = ggtt->pt_ops->pte_encode_bo;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
for (row = 0; row < height; row++) {
u32 src_idx = src_stride * row + bo_ofs;
for (column = 0; column < width; column++) {
- iosys_map_wr(map, *dpt_ofs, u64,
- pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]));
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx++;
@@ -129,13 +128,13 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
return PTR_ERR(dpt);
if (view->type == I915_GTT_VIEW_NORMAL) {
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
u32 x;
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
+ iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr);
}
} else if (view->type == I915_GTT_VIEW_REMAPPED) {
const struct intel_remapped_info *remap_info = &view->remapped;
@@ -164,6 +163,9 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
vma->dpt = dpt;
vma->node = dpt->ggtt_node[tile0->id];
+
+ /* Ensure DPT writes are flushed */
+ xe_device_l2_flush(xe);
return 0;
}
@@ -173,15 +175,15 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
{
struct xe_device *xe = xe_bo_device(bo);
u32 column, row;
+ u64 pte = ggtt->pt_ops->pte_encode_flags(bo, xe->pat.idx[XE_CACHE_NONE]);
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte | addr);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
@@ -199,14 +201,15 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_ggtt *ggtt = tile0->mem.ggtt;
u32 align;
int ret;
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(xe);
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -215,29 +218,22 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node[ggtt->tile->id];
+ if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[tile0->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
- u32 x, size = bo->ttm.base.size;
-
vma->node = xe_ggtt_node_init(ggtt);
if (IS_ERR(vma->node)) {
ret = PTR_ERR(vma->node);
goto out_unlock;
}
- ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
if (ret) {
xe_ggtt_node_fini(vma->node);
goto out_unlock;
}
- for (x = 0; x < size; x += XE_PAGE_SIZE) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_NONE]);
-
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
- }
+ xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
} else {
u32 i, ggtt_ofs;
const struct intel_rotation_info *rot_info = &view->rotated;
@@ -271,7 +267,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_pm_runtime_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -333,8 +329,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (ret)
goto err_unpin;
- /* Ensure DPT writes are flushed */
- xe_device_l2_flush(xe);
return vma;
err_unpin:
@@ -348,7 +342,7 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- u8 tile_id = vma->node->ggtt->tile->id;
+ u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id;
if (!refcount_dec_and_test(&vma->ref))
return;
@@ -389,6 +383,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct intel_display *display = xe->display;
struct i915_vma *vma;
if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
@@ -399,8 +394,8 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
goto found;
}
- if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
- vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(display->fbdev.fbdev);
if (vma)
goto found;
}
@@ -463,3 +458,8 @@ u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
return 0;
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ *map = vma->bo->vmap;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 7c02323e9531..30f1073141fc 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
@@ -22,7 +21,8 @@
#define HECI_MEADDRESS_HDCP 18
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct xe_device *xe;
struct xe_bo *hdcp_bo;
u64 hdcp_cmd_in;
u64 hdcp_cmd_out;
@@ -30,14 +30,9 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -69,10 +64,9 @@ out:
}
/*This function helps allocate memory for the command that we will send to gsc cs */
-static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
- struct intel_hdcp_gsc_message *hdcp_message)
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_context *gsc_context)
{
- struct xe_device *xe = to_xe_device(display->drm);
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
int ret = 0;
@@ -84,113 +78,69 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
- drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
ret = PTR_ERR(bo);
goto out;
}
cmd_in = xe_bo_ggtt_addr(bo);
cmd_out = cmd_in + PAGE_SIZE;
- xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo));
+
+ gsc_context->hdcp_bo = bo;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->xe = xe;
- hdcp_message->hdcp_bo = bo;
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(display, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
if (ret) {
- drm_err(display->drm, "Could not initialize hdcp_message\n");
- kfree(hdcp_message);
- return ret;
+ drm_err(&xe->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
}
- display->hdcp.hdcp_message = hdcp_message;
- return ret;
-}
-
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-int intel_hdcp_gsc_init(struct intel_display *display)
-{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- if (ret)
- kfree(data);
-
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
+ return gsc_context;
}
-void intel_hdcp_gsc_fini(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = display->hdcp.arbiter;
-
- if (hdcp_message) {
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
- display->hdcp.hdcp_message = NULL;
- }
+ if (!gsc_context)
+ return;
- kfree(arb);
- display->hdcp.arbiter = NULL;
+ xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
+ kfree(gsc_context);
}
static int xe_gsc_send_sync(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message,
+ struct intel_hdcp_gsc_context *gsc_context,
u32 msg_size_in, u32 msg_size_out,
u32 addr_out_off)
{
- struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
- struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
struct xe_gsc *gsc = &gt->uc.gsc;
int ret;
- ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
- hdcp_message->hdcp_cmd_out, msg_size_out);
+ ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
+ gsc_context->hdcp_cmd_out, msg_size_out);
if (ret) {
drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
@@ -205,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe,
return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct xe_device *xe = gsc_context->xe;
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
- struct intel_hdcp_gsc_message *hdcp_message;
u64 host_session_id;
u32 msg_size_in, msg_size_out;
u32 addr_out_off, addr_in_wr_off = 0;
@@ -223,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
- hdcp_message = xe->display.hdcp.hdcp_message;
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_pm_runtime_get_noresume(xe);
- addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
- xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -240,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
addr_out_off);
/* Only try again if gsc says so */
@@ -254,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
if (ret)
goto out;
- xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
addr_out_off + HDCP_GSC_HEADER_SIZE,
msg_out_len);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 4ca0cb571194..dcbc4b2d3fd9 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,14 +10,15 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
#include "xe_wa.h"
@@ -83,16 +84,12 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
- u64 __iomem *gte = tile0->mem.ggtt->gsm;
- u64 pte;
+ u64 pte = xe_ggtt_read_pte(tile0->mem.ggtt, base);
- gte += base / XE_PAGE_SIZE;
-
- pte = ioread64(gte);
if (!(pte & XE_GGTT_PTE_DM)) {
drm_err(&xe->drm,
"Initial plane programming missing DM bit\n");
@@ -187,7 +184,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- &bo->ttm.base, &mode_cmd)) {
+ &bo->ttm.base, fb->format, &mode_cmd)) {
drm_dbg_kms(&xe->drm, "intel fb init failed\n");
goto err_bo;
}
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2a7fccbeb1d5..78bda4c47874 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -3,9 +3,9 @@
* Copyright © 2024 Intel Corporation
*/
-#include "xe_device.h"
-#include "intel_display_types.h"
+#include "intel_display_core.h"
#include "intel_tdf.h"
+#include "xe_device.h"
void intel_td_flush(struct intel_display *display)
{
diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
new file mode 100644
index 000000000000..2987b10d3e16
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_ALU_COMMANDS_H_
+#define _XE_ALU_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/* Instruction Opcodes */
+#define CS_ALU_OPCODE_NOOP 0x000
+#define CS_ALU_OPCODE_FENCE_RD 0x001
+#define CS_ALU_OPCODE_FENCE_WR 0x002
+#define CS_ALU_OPCODE_LOAD 0x080
+#define CS_ALU_OPCODE_LOADINV 0x480
+#define CS_ALU_OPCODE_LOAD0 0x081
+#define CS_ALU_OPCODE_LOAD1 0x481
+#define CS_ALU_OPCODE_LOADIND 0x082
+#define CS_ALU_OPCODE_ADD 0x100
+#define CS_ALU_OPCODE_SUB 0x101
+#define CS_ALU_OPCODE_AND 0x102
+#define CS_ALU_OPCODE_OR 0x103
+#define CS_ALU_OPCODE_XOR 0x104
+#define CS_ALU_OPCODE_SHL 0x105
+#define CS_ALU_OPCODE_SHR 0x106
+#define CS_ALU_OPCODE_SAR 0x107
+#define CS_ALU_OPCODE_STORE 0x180
+#define CS_ALU_OPCODE_STOREINV 0x580
+#define CS_ALU_OPCODE_STOREIND 0x181
+
+/* Instruction Operands */
+#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n))
+#define CS_ALU_OPERAND_REG0 0x0
+#define CS_ALU_OPERAND_REG1 0x1
+#define CS_ALU_OPERAND_REG2 0x2
+#define CS_ALU_OPERAND_REG3 0x3
+#define CS_ALU_OPERAND_REG4 0x4
+#define CS_ALU_OPERAND_REG5 0x5
+#define CS_ALU_OPERAND_REG6 0x6
+#define CS_ALU_OPERAND_REG7 0x7
+#define CS_ALU_OPERAND_REG8 0x8
+#define CS_ALU_OPERAND_REG9 0x9
+#define CS_ALU_OPERAND_REG10 0xa
+#define CS_ALU_OPERAND_REG11 0xb
+#define CS_ALU_OPERAND_REG12 0xc
+#define CS_ALU_OPERAND_REG13 0xd
+#define CS_ALU_OPERAND_REG14 0xe
+#define CS_ALU_OPERAND_REG15 0xf
+#define CS_ALU_OPERAND_SRCA 0x20
+#define CS_ALU_OPERAND_SRCB 0x21
+#define CS_ALU_OPERAND_ACCU 0x31
+#define CS_ALU_OPERAND_ZF 0x32
+#define CS_ALU_OPERAND_CF 0x33
+#define CS_ALU_OPERAND_NA 0 /* N/A operand */
+
+/* Command Streamer ALU Instructions */
+#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \
+ REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \
+ REG_FIELD_PREP(GENMASK(9, 0), (op2)))
+
+#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \
+ CS_ALU_OPERAND_##op1, \
+ CS_ALU_OPERAND_##op2)
+
+#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA)
+#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2)
+#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2)
+#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA)
+#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA)
+#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA)
+#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA)
+#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA)
+#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA)
+#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA)
+#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2)
+#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 31d28a67ef6a..457881af8af9 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -137,6 +137,7 @@
#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89)
#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 167fb0f742de..e3f5e8bb3ebc 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -32,6 +32,7 @@
#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1))
#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
#define MI_SDI_GGTT REG_BIT(22)
@@ -47,6 +48,10 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
+#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
+#define MI_SRM_USE_GGTT REG_BIT(22)
+#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
+
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
@@ -61,6 +66,10 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
+#define MI_LRR_DST_CS_MMIO REG_BIT(19)
+#define MI_LRR_SRC_CS_MMIO REG_BIT(18)
+
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h
index ce05b6ae832f..880140d6ccdc 100644
--- a/drivers/gpu/drm/xe/regs/xe_bars.h
+++ b/drivers/gpu/drm/xe/regs/xe_bars.h
@@ -7,5 +7,6 @@
#define GTTMMADR_BAR 0 /* MMIO + GTT */
#define LMEM_BAR 2 /* VRAM */
+#define VF_LMEM_BAR 9 /* VF VRAM */
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index fb8ec317b6ee..7ade41e2b7b3 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -43,6 +43,10 @@
#define XEHPC_BCS8_RING_BASE 0x3ee000
#define GSCCS_RING_BASE 0x11a000
+#define ENGINE_ID(base) XE_REG((base) + 0x8c)
+#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4)
+#define ENGINE_CLASS_ID REG_GENMASK(2, 0)
+
#define RING_TAIL(base) XE_REG((base) + 0x30)
#define TAIL_ADDR REG_GENMASK(20, 3)
@@ -154,6 +158,7 @@
#define STOP_RING REG_BIT(8)
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
@@ -188,6 +193,10 @@
#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4)
+#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2)
+#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1)
+
#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 7702364b65f1..9b66cc972a63 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -16,6 +16,10 @@
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
+#define DG1_GSC_HECI2_BASE 0x00259000
+#define PVC_GSC_HECI2_BASE 0x00285000
+#define DG2_GSC_HECI2_BASE 0x00374000
+
#define HECI_H_CSR(base) XE_REG((base) + 0x4)
#define HECI_H_CSR_IE REG_BIT(0)
#define HECI_H_CSR_IS REG_BIT(1)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index da1f198ac107..5cd5ab8529c5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -62,7 +62,6 @@
#define LE_SSE_MASK REG_GENMASK(18, 17)
#define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value)
#define LE_COS_MASK REG_GENMASK(16, 15)
-#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK)
#define LE_SCF_MASK REG_BIT(14)
#define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value)
#define LE_PFM_MASK REG_GENMASK(13, 11)
@@ -157,6 +156,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
@@ -392,6 +392,18 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define LSN_VC_REG2 XE_REG_MCR(0xb0c8)
+#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28)
+#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value)
+#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24)
+#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value)
+#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20)
+#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value)
+#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16)
+#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value)
+#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12)
+#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value)
+
#define L3SQCREG2 XE_REG_MCR(0xb104)
#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
new file mode 100644
index 000000000000..af781c8e4a80
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_REGS_H_
+#define _XE_I2C_REGS_H_
+
+#include <linux/pci_regs.h>
+
+#include "xe_reg_defs.h"
+#include "xe_regs.h"
+
+#define I2C_BRIDGE_OFFSET (SOC_BASE + 0xd9000)
+#define I2C_CONFIG_SPACE_OFFSET (SOC_BASE + 0xf6000)
+#define I2C_MEM_SPACE_OFFSET (SOC_BASE + 0xf7400)
+
+#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164)
+#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168)
+
+#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND)
+#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84)
+
+#endif /* _XE_I2C_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index f0ecfcac4003..13635e4331d4 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -19,6 +19,7 @@
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
#define DISPLAY_IRQ REG_BIT(16)
+#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
/*
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 57944f90bbf6..1b101edb838b 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -11,8 +11,14 @@
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_PER_CTX_PTR (0x12 + 1)
+#define CTX_CS_INDIRECT_CTX (0x14 + 1)
+#define CTX_CS_INDIRECT_CTX_OFFSET (0x16 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
+#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
+#define CTX_ACC_CTR_THOLD (0x2a + 1)
+#define CTX_ASID (0x2e + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
@@ -34,4 +40,7 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
+#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
+#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index f5e5234857c1..ef2bf984723f 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -38,10 +38,11 @@
#define TEMP_MASK REG_GENMASK(7, 0)
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
-#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
-#define PKG_PWR_LIM_1_EN REG_BIT(15)
-#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
-#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
-#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
+#define PWR_LIM_VAL REG_GENMASK(14, 0)
+#define PWR_LIM_EN REG_BIT(15)
+#define PWR_LIM REG_GENMASK(15, 0)
+#define PWR_LIM_TIME REG_GENMASK(23, 17)
+#define PWR_LIM_TIME_X REG_GENMASK(23, 22)
+#define PWR_LIM_TIME_Y REG_GENMASK(21, 17)
#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a79ad2da070c..e693a50706f8 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -97,4 +97,7 @@
#define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET)
#define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET)
+#define OAM_COMPRESSION_T3_CONTROL XE_REG(0x1c2e00)
+#define OAM_LAT_MEASURE_ENABLE REG_BIT(4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 8846eb9ce2a4..fb097607b86c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,13 +18,10 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
-#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
-#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
+#define BMG_FAN_1_SPEED XE_REG(0x138140)
+#define BMG_FAN_2_SPEED XE_REG(0x138170)
+#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
-#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
-#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
-#define BMG_PLATFORM_POWER_LIMIT XE_REG(0x138460)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index f45abcd96ba8..2995d72c3f78 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -5,11 +5,16 @@
#ifndef _XE_PMT_H_
#define _XE_PMT_H_
-#define SOC_BASE 0x280000
+#include "xe_regs.h"
#define BMG_PMT_BASE_OFFSET 0xDB000
#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+#define PUNIT_TELEMETRY_GUID XE_REG(BMG_DISCOVERY_OFFSET + 0x4)
+#define BMG_ENERGY_STATUS_PMT_OFFSET (0x30)
+#define ENERGY_PKG REG_GENMASK64(31, 0)
+#define ENERGY_CARD REG_GENMASK64(63, 32)
+
#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 3abb17d2ca33..1926b4044314 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,6 +7,8 @@
#include "regs/xe_reg_defs.h"
+#define SOC_BASE 0x280000
+
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9fde67ca989f..bb469096d072 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -106,7 +106,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Check last CCS value, or at least last value in page. */
- offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size);
+ offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo));
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test,
@@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
@@ -514,9 +514,9 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
- xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
xe_tt->purgeable = true;
- xe_ttm_tt_account_add(&xe_tt->ttm);
+ xe_ttm_tt_account_add(xe, &xe_tt->ttm);
bo->ttm.priority = 0;
spin_lock(&bo->ttm.bdev->lru_lock);
ttm_bo_move_to_lru_tail(&bo->ttm);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index cedd3e88a6fb..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
deleted file mode 100644
index b683585db852..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 AND MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <kunit/test.h>
-
-#include "xe_device.h"
-#include "xe_kunit_helpers.h"
-#include "xe_pci_test.h"
-
-static int pf_service_test_init(struct kunit *test)
-{
- struct xe_pci_fake_data fake = {
- .sriov_mode = XE_SRIOV_MODE_PF,
- .platform = XE_TIGERLAKE, /* some random platform */
- .subplatform = XE_SUBPLATFORM_NONE,
- };
- struct xe_device *xe;
- struct xe_gt *gt;
-
- test->priv = &fake;
- xe_kunit_helper_xe_device_test_init(test);
-
- xe = test->priv;
- KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
-
- gt = xe_device_get_gt(xe, 0);
- pf_init_versions(gt);
-
- /*
- * sanity check:
- * - all supported platforms VF/PF ABI versions must be defined
- * - base version can't be newer than latest
- */
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.latest.major);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor,
- gt->sriov.pf.service.version.latest.minor);
-
- test->priv = gt;
- return 0;
-}
-
-static void pf_negotiate_any(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY,
- VF2PF_HANDSHAKE_MINOR_ANY,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_base_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor);
-}
-
-static void pf_negotiate_base_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major);
- if (major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.base.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor - 1,
- &major, &minor));
-}
-
-static void pf_negotiate_base_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major - 1, 1,
- &major, &minor));
-}
-
-static void pf_negotiate_latest_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.latest.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor - 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1);
-}
-
-static void pf_negotiate_latest_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- kunit_skip(test, "no prev major");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major - 1,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1);
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
-}
-
-static struct kunit_case pf_service_test_cases[] = {
- KUNIT_CASE(pf_negotiate_any),
- KUNIT_CASE(pf_negotiate_base_match),
- KUNIT_CASE(pf_negotiate_base_newer),
- KUNIT_CASE(pf_negotiate_base_next),
- KUNIT_CASE(pf_negotiate_base_older),
- KUNIT_CASE(pf_negotiate_base_prev),
- KUNIT_CASE(pf_negotiate_latest_match),
- KUNIT_CASE(pf_negotiate_latest_newer),
- KUNIT_CASE(pf_negotiate_latest_next),
- KUNIT_CASE(pf_negotiate_latest_older),
- KUNIT_CASE(pf_negotiate_latest_prev),
- {}
-};
-
-static struct kunit_suite pf_service_suite = {
- .name = "pf_service",
- .test_cases = pf_service_test_cases,
- .init = pf_service_test_init,
-};
-
-kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
index 6faffcd74869..d266882adc0e 100644
--- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -32,7 +32,7 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
bo->tile = tile;
bo->ttm.bdev = &xe->ttm;
- bo->size = size;
+ bo->ttm.base.size = size;
iosys_map_set_vaddr(&bo->vmap, buf);
if (flags & XE_BO_FLAG_GGTT) {
@@ -42,10 +42,8 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
KUNIT_ASSERT_EQ(test, 0,
- drm_mm_insert_node_in_range(&ggtt->mm,
- &bo->ggtt_node[tile->id]->base,
- bo->size, SZ_4K,
- 0, 0, U64_MAX, 0));
+ xe_ggtt_node_insert(bo->ggtt_node[tile->id],
+ xe_bo_size(bo), SZ_4K));
}
return bo;
@@ -67,8 +65,9 @@ static int guc_buf_test_init(struct kunit *test)
ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
- drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
- mutex_init(&ggtt->lock);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
+ DUT_GGTT_START + DUT_GGTT_SIZE));
kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
replacement_xe_managed_bo_create_pin_map);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index d5fe0ea889ad..edd1e701aa1c 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -74,13 +74,13 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
- bool big = bo->size >= SZ_2M;
+ bool big = xe_bo_size(bo) >= SZ_2M;
struct dma_fence *fence;
const char *str = big ? "Copying big bo" : "Copying small bo";
int err;
struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
- bo->size,
+ xe_bo_size(bo),
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
@@ -105,7 +105,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
goto out_unlock;
}
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
fence = xe_migrate_clear(m, remote, remote->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
@@ -113,15 +113,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected, "remote first offset should be cleared",
test);
- retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64);
check(retval, expected, "remote last offset should be cleared",
test);
}
dma_fence_put(fence);
/* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
- xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo));
expected = 0xc0c0c0c0c0c0c0c0;
fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
@@ -131,15 +131,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &bo->vmap, 0, u64);
check(retval, expected,
"remote -> vram bo first offset should be copied", test);
- retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"remote -> vram bo offset should be copied", test);
}
dma_fence_put(fence);
/* And other way around.. slightly hacky.. */
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo));
fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
remote->ttm.resource, false);
@@ -148,7 +148,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected,
"vram -> remote bo first offset should be copied", test);
- retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"vram -> remote bo last offset should be copied", test);
}
@@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -248,9 +245,9 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
- xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
+ xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it);
else
- xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
&src_it, XE_PAGE_SIZE, pt->ttm.resource);
@@ -279,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a small bo */
kunit_info(test, "Clearing small buffer object\n");
- xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
+ xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny));
expected = 0;
fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -289,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
check(retval, expected, "Command clear small first value", test);
- retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
+ retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32);
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
@@ -301,7 +298,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a big bo */
kunit_info(test, "Clearing big buffer object\n");
- xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
+ xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big));
expected = 0;
fence = xe_migrate_clear(m, big, big->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -311,7 +308,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &big->vmap, 0, u32);
check(retval, expected, "Command clear big first value", test);
- retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
+ retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32);
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
@@ -373,7 +370,7 @@ static struct dma_fence *blt_copy(struct xe_tile *tile,
struct xe_migrate *m = tile->migrate;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it;
struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
u64 src_L0_ofs, dst_L0_ofs;
@@ -501,7 +498,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
long ret;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -512,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo, true);
+ ret = xe_bo_evict(vram_bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -526,7 +523,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Clear evicted vram data first value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Clear evicted vram data last value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -535,7 +532,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -565,7 +562,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -573,7 +570,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -586,7 +583,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
u64 expected, retval;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -600,7 +597,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
}
dma_fence_put(fence);
@@ -618,7 +615,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear main buffer first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear main buffer last value", test);
}
dma_fence_put(fence);
@@ -628,7 +625,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear ccs data last value", test);
}
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index ef1e5256c56a..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ }
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 1d3e2e50c355..9c715e59f030 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,49 +12,79 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
+ param->verx100 / 100, param->verx100 % 100, param->name);
+}
+
+KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
+
+static void xe_pci_id_kunit_desc(const struct pci_device_id *param, char *desc)
+{
+ const struct xe_device_desc *dev_desc =
+ (const struct xe_device_desc *)param->driver_data;
+
+ if (dev_desc)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "0x%X (%s)",
+ param->device, dev_desc->platform_name);
+}
+
+KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
+
/**
- * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
- * This function iterates over the descriptors for all graphics IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_graphics_desc *desc, *last = NULL;
-
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- desc = graphics_ips[i].desc;
- if (desc == last)
- continue;
-
- xe_fn(desc);
- last = desc;
- }
+ return graphics_ip_gen_params(prev, desc);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
- * xe_call_for_each_media_ip - Iterate over all recognized media IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
*
- * This function iterates over the descriptors for all media IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_media_ip(xe_media_fn xe_fn)
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_media_desc *desc, *last = NULL;
+ return media_ip_gen_params(prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- desc = media_ips[i].desc;
- if (desc == last)
- continue;
+/**
+ * xe_pci_id_gen_param - Generate struct pci_device_id parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct pci_device_id parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_id_gen_param(const void *prev, char *desc)
+{
+ const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
- xe_fn(desc);
- last = desc;
- }
+ return pci->driver_data ? pci : NULL;
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
u32 *ver, u32 *revid)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 744a37583d2d..37b344df2dc3 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -14,9 +14,10 @@
#include "xe_pci_test.h"
#include "xe_pci_types.h"
-static void check_graphics_ip(const struct xe_graphics_desc *graphics)
+static void check_graphics_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_graphics_desc *graphics = param->desc;
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -28,9 +29,10 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_media_ip(const struct xe_media_desc *media)
+static void check_media_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_media_desc *media = param->desc;
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
@@ -42,19 +44,21 @@ static void check_media_ip(const struct xe_media_desc *media)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void xe_gmdid_graphics_ip(struct kunit *test)
+static void check_platform_gt_count(struct kunit *test)
{
- xe_call_for_each_graphics_ip(check_graphics_ip);
-}
+ const struct pci_device_id *pci = test->param_value;
+ const struct xe_device_desc *desc =
+ (const struct xe_device_desc *)pci->driver_data;
+ int max_gt = desc->max_gt_per_tile;
-static void xe_gmdid_media_ip(struct kunit *test)
-{
- xe_call_for_each_media_ip(check_media_ip);
+ KUNIT_ASSERT_GT(test, max_gt, 0);
+ KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
}
static struct kunit_case xe_pci_tests[] = {
- KUNIT_CASE(xe_gmdid_graphics_ip),
- KUNIT_CASE(xe_gmdid_media_ip),
+ KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
+ KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
+ KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ede46800aff1..ce4d2b86b778 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -12,15 +12,6 @@
#include "xe_sriov_types.h"
struct xe_device;
-struct xe_graphics_desc;
-struct xe_media_desc;
-
-typedef int (*xe_device_fn)(struct xe_device *);
-typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
-typedef void (*xe_media_fn)(const struct xe_media_desc *);
-
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
-void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
@@ -34,6 +25,9 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_id_gen_param(const void *prev, char *desc);
const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
new file mode 100644
index 000000000000..ba95e29b597d
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+static int pf_service_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ xe_sriov_pf_service_init(xe);
+ /*
+ * sanity check:
+ * - all supported platforms VF/PF ABI versions must be defined
+ * - base version can't be newer than latest
+ */
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.latest.major);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor,
+ xe->sriov.pf.service.version.latest.minor);
+ return 0;
+}
+
+static void pf_negotiate_any(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe, VF2PF_HANDSHAKE_MAJOR_ANY,
+ VF2PF_HANDSHAKE_MINOR_ANY,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_base_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.base.minor);
+}
+
+static void pf_negotiate_base_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_GE(test, minor, xe->sriov.pf.service.version.base.minor);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_LE(test, major, xe->sriov.pf.service.version.latest.major);
+ if (major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.base.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor - 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_base_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major - 1, 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_latest_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.latest.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor - 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor - 1);
+}
+
+static void pf_negotiate_latest_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ kunit_skip(test, "no prev major");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major - 1,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major - 1);
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+}
+
+static struct kunit_case pf_service_test_cases[] = {
+ KUNIT_CASE(pf_negotiate_any),
+ KUNIT_CASE(pf_negotiate_base_match),
+ KUNIT_CASE(pf_negotiate_base_newer),
+ KUNIT_CASE(pf_negotiate_base_next),
+ KUNIT_CASE(pf_negotiate_base_older),
+ KUNIT_CASE(pf_negotiate_base_prev),
+ KUNIT_CASE(pf_negotiate_latest_match),
+ KUNIT_CASE(pf_negotiate_latest_newer),
+ KUNIT_CASE(pf_negotiate_latest_next),
+ KUNIT_CASE(pf_negotiate_latest_older),
+ KUNIT_CASE(pf_negotiate_latest_prev),
+ {}
+};
+
+static struct kunit_suite pf_service_suite = {
+ .name = "pf_service",
+ .test_cases = pf_service_test_cases,
+ .init = pf_service_test_init,
+};
+
+kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 9570672fce33..5ce0e26822f2 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -19,7 +19,7 @@ static int bb_prefetch(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
+ if (GRAPHICS_VERx100(xe) >= 1250 && xe_gt_is_main_type(gt))
/*
* RCS and CCS require 1K, although other engines would be
* okay with 512.
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index fafacd73dcc3..b5cc65506696 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -14,7 +14,7 @@ struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
-struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
+struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 64f9c936eea0..18f27da47a36 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -19,6 +19,8 @@
#include <kunit/static_stub.h>
+#include <trace/events/gpu_mem.h>
+
#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
@@ -55,6 +57,8 @@ static struct ttm_placement sys_placement = {
.placement = &sys_placement_flags,
};
+static struct ttm_placement purge_placement;
+
static const struct ttm_place tt_placement_flags[] = {
{
.fpfn = 0,
@@ -189,11 +193,18 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
static bool force_contiguous(u32 bo_flags)
{
+ if (bo_flags & XE_BO_FLAG_STOLEN)
+ return true; /* users expect this */
+ else if (bo_flags & XE_BO_FLAG_PINNED &&
+ !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
+ return true; /* needs vmap */
+
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
* must be contiguous, also only contiguous BOs support xe_bo_vmap.
*/
- return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
+ bo_flags & XE_BO_FLAG_PINNED;
}
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
@@ -281,6 +292,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct xe_bo *bo;
if (!xe_bo_is_xe_bo(tbo)) {
@@ -290,7 +303,7 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
- *placement = sys_placement;
+ *placement = device_unplugged ? purge_placement : sys_placement;
return;
}
@@ -300,6 +313,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ if (device_unplugged && !tbo->base.dma_buf) {
+ *placement = purge_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -320,15 +338,13 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- /** @xe - The xe device */
- struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
bool purgeable;
};
-static int xe_tt_map_sg(struct ttm_tt *tt)
+static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
unsigned long num_pages = tt->num_pages;
@@ -343,13 +359,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->xe->drm.dev),
+ xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -360,12 +376,12 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
return 0;
}
-static void xe_tt_unmap_sg(struct ttm_tt *tt)
+static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -384,24 +400,37 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
* Account ttm pages against the device shrinker's shrinkable and
* purgeable counts.
*/
-static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
}
-static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
+static void update_global_total_pages(struct ttm_device *ttm_dev,
+ long num_pages)
+{
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+ u64 global_total_pages =
+ atomic64_add_return(num_pages, &xe->global_total_pages);
+
+ trace_gpu_mem_total(xe->drm.primary->index, 0,
+ global_total_pages << PAGE_SHIFT);
+#endif
}
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
@@ -420,11 +449,10 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
return NULL;
tt = &xe_tt->ttm;
- xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
- extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
+ extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
PAGE_SIZE);
/*
@@ -511,21 +539,25 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
return err;
xe_tt->purgeable = false;
- xe_ttm_tt_account_add(tt);
+ xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
+ update_global_total_pages(ttm_dev, tt->num_pages);
return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+
if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
- xe_tt_unmap_sg(tt);
+ xe_tt_unmap_sg(xe, tt);
ttm_pool_free(&ttm_dev->pool, tt);
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
+ update_global_total_pages(ttm_dev, -(long)tt->num_pages);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -657,11 +689,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct sg_table *sg;
xe_assert(xe, attach);
xe_assert(xe, ttm_bo->ttm);
+ if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
+ ttm_bo->sg) {
+ dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -764,7 +805,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Bo creation path, moving to system or TT. */
if ((!old_mem && ttm) && !handle_system_ccs) {
if (new_mem->mem_type == XE_PL_TT)
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (!ret)
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -787,7 +828,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
(!ttm && ttm_bo->type == ttm_bo_type_device);
if (new_mem->mem_type == XE_PL_TT) {
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (ret)
goto out;
}
@@ -816,21 +857,6 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
- /* Reject BO eviction if BO is bound to current VM. */
- if (evict && ctx->resv) {
- struct drm_gpuvm_bo *vm_bo;
-
- drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) {
- struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
-
- if (xe_vm_resv(vm) == ctx->resv &&
- xe_vm_in_preempt_fence_mode(vm)) {
- ret = -EBUSY;
- goto out;
- }
- }
- }
-
/*
* Failed multi-hop where the old_mem is still marked as
* TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
@@ -898,79 +924,44 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_pm_runtime_get_noresume(xe);
}
- if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
- /*
- * Kernel memory that is pinned should only be moved on suspend
- * / resume, some of the pinned memory is required for the
- * device to resume / use the GPU to move other evicted memory
- * (user memory) around. This likely could be optimized a bit
- * further where we find the minimum set of pinned memory
- * required for resume but for simplity doing a memcpy for all
- * pinned memory.
- */
- ret = xe_bo_vmap(bo);
- if (!ret) {
- ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
-
- /* Create a new VMAP once kernel BO back in VRAM */
- if (!ret && resource_is_vram(new_mem)) {
- struct xe_vram_region *vram = res_to_mem_region(new_mem);
- void __iomem *new_addr = vram->mapping +
- (new_mem->start << PAGE_SHIFT);
+ if (move_lacks_source) {
+ u32 flags = 0;
- if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
- ret = -EINVAL;
- xe_pm_runtime_put(xe);
- goto out;
- }
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
- xe_assert(xe, new_mem->start ==
- bo->placements->fpfn);
-
- iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
- }
- }
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
} else {
- if (move_lacks_source) {
- u32 flags = 0;
-
- if (mem_type_is_vram(new_mem->mem_type))
- flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
- else if (handle_system_ccs)
- flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
-
- fence = xe_migrate_clear(migrate, bo, new_mem, flags);
- }
- else
- fence = xe_migrate_copy(migrate, bo, bo, old_mem,
- new_mem, handle_system_ccs);
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- xe_pm_runtime_put(xe);
- goto out;
- }
- if (!move_lacks_source) {
- ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
- true, new_mem);
- if (ret) {
- dma_fence_wait(fence, false);
- ttm_bo_move_null(ttm_bo, new_mem);
- ret = 0;
- }
- } else {
- /*
- * ttm_bo_move_accel_cleanup() may blow up if
- * bo->resource == NULL, so just attach the
- * fence and set the new resource.
- */
- dma_resv_add_fence(ttm_bo->base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
+ handle_system_ccs);
+ }
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_pm_runtime_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
+ new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
}
-
- dma_fence_put(fence);
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
}
+ dma_fence_put(fence);
xe_pm_runtime_put(xe);
out:
@@ -983,7 +974,7 @@ out:
if (timeout < 0)
ret = timeout;
- xe_tt_unmap_sg(ttm_bo->ttm);
+ xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
return ret;
@@ -993,6 +984,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
unsigned long *scanned)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
long lret;
/* Fake move to system, without copying data. */
@@ -1007,7 +999,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
if (lret)
return lret;
- xe_tt_unmap_sg(bo->ttm);
+ xe_tt_unmap_sg(xe, bo->ttm);
ttm_bo_move_null(bo, new_resource);
}
@@ -1018,11 +1010,30 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
.allow_move = false});
if (lret > 0)
- xe_ttm_tt_account_subtract(bo->ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm);
return lret;
}
+static bool
+xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ if (!ttm_bo_eviction_valuable(bo, place))
+ return false;
+
+ if (!xe_bo_is_xe_bo(bo))
+ return true;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
+ if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
+ return false;
+ }
+
+ return true;
+}
+
/**
* xe_bo_shrink() - Try to shrink an xe bo.
* @ctx: The struct ttm_operation_ctx used for shrinking.
@@ -1049,7 +1060,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
- struct xe_device *xe = xe_tt->xe;
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
bool needs_rpm;
long lret = 0L;
@@ -1057,7 +1068,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
(flags.purge && !xe_tt->purgeable))
return -EBUSY;
- if (!ttm_bo_eviction_valuable(bo, &place))
+ if (!xe_bo_eviction_valuable(bo, &place))
return -EBUSY;
if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
@@ -1086,7 +1097,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
xe_pm_runtime_put(xe);
if (lret > 0)
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
out_unref:
xe_bo_put(xe_bo);
@@ -1095,6 +1106,80 @@ out_unref:
}
/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup;
+ int ret = 0;
+
+ xe_bo_lock(bo, false);
+
+ xe_assert(xe, !bo->backup_obj);
+
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ goto out_unlock_bo;
+
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
+
+out_unlock_bo:
+ xe_bo_unlock(bo);
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, false);
+ if (bo->backup_obj) {
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
@@ -1107,59 +1192,100 @@ out_unref:
*/
int xe_bo_evict_pinned(struct xe_bo *bo)
{
- struct ttm_place place = {
- .mem_type = XE_PL_TT,
- };
- struct ttm_placement placement = {
- .placement = &place,
- .num_placement = 1,
- };
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .gfp_retry_mayfail = true,
- };
- struct ttm_resource *new_mem;
- int ret;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ bool unmap = false;
+ int ret = 0;
- xe_bo_assert_held(bo);
+ xe_bo_lock(bo, false);
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
if (!xe_bo_is_vram(bo))
- return 0;
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ if (!backup) {
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
+ NULL, xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
- ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
+
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- if (!bo->ttm.ttm) {
- bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
- if (!bo->ttm.ttm) {
- ret = -ENOMEM;
- goto err_res_free;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
+ backup->ttm.resource, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_backup;
}
- }
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_backup;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
+ xe_bo_size(bo));
+ }
- return 0;
+ if (!bo->backup_obj)
+ bo->backup_obj = backup;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (ret && backup_created)
+ xe_bo_put(backup);
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
@@ -1180,50 +1306,110 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
.gfp_retry_mayfail = false,
};
- struct ttm_resource *new_mem;
- struct ttm_place *place = &bo->placements[0];
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool unmap = false;
int ret;
- xe_bo_assert_held(bo);
+ if (!backup)
+ return 0;
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ xe_bo_lock(bo, false);
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (!xe_bo_is_pinned(backup)) {
+ ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+ if (ret)
+ goto out_unlock_bo;
+ }
- if (WARN_ON(xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
- return -EINVAL;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- if (!mem_type_is_vram(place->mem_type))
- return 0;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, backup, bo,
+ backup->ttm.resource, bo->ttm.resource,
+ false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_unlock_bo;
+ }
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_unlock_bo;
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- return 0;
+ xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
+ xe_bo_size(bo));
+ }
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+ bo->backup_obj = NULL;
+
+out_backup:
+ xe_bo_vunmap(backup);
+ if (!bo->backup_obj) {
+ if (xe_bo_is_pinned(backup))
+ ttm_bo_unpin(&backup->ttm);
+ xe_bo_put(backup);
+ }
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct ttm_tt *tt = ttm_bo->ttm;
+
+ if (tt) {
+ struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
+
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach,
+ ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ } else if (xe_tt->sg) {
+ dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
+ xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
@@ -1371,6 +1557,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
struct xe_res_cursor cursor;
struct xe_vram_region *vram;
int bytes_left = len;
+ int err = 0;
xe_bo_assert_held(bo);
xe_device_assert_mem_access(xe);
@@ -1378,13 +1565,18 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- /* FIXME: Use GPU for non-visible VRAM */
- if (!xe_ttm_resource_visible(ttm_bo->resource))
- return -EIO;
+ if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ struct xe_migrate *migrate =
+ mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
+
+ err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
+ write);
+ goto out;
+ }
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
- bo->size - (offset & PAGE_MASK), &cursor);
+ xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
do {
unsigned long page_offset = (offset & ~PAGE_MASK);
@@ -1404,7 +1596,8 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
xe_res_next(&cursor, PAGE_SIZE);
} while (bytes_left);
- return len;
+out:
+ return err ?: len;
}
const struct ttm_device_funcs xe_ttm_funcs = {
@@ -1418,7 +1611,7 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.io_mem_pfn = xe_ttm_io_mem_pfn,
.access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
- .eviction_valuable = ttm_bo_eviction_valuable,
+ .eviction_valuable = xe_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
.swap_notify = xe_ttm_bo_swap_notify,
};
@@ -1448,6 +1641,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->parent_obj)
+ xe_bo_put(bo->parent_obj);
+
mutex_lock(&xe->mem_access.vram_userfault.lock);
if (!list_empty(&bo->vram_userfault_link))
list_del(&bo->vram_userfault_link);
@@ -1680,7 +1876,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->ccs_cleared = false;
bo->tile = tile;
- bo->size = size;
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
@@ -1858,7 +2053,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + bo->size, U64_MAX);
+ start + xe_bo_size(bo), U64_MAX);
} else {
err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
}
@@ -1947,7 +2142,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
alignment);
if (IS_ERR(bo))
return bo;
@@ -1979,21 +2174,6 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
}
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags)
-{
- struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- type, flags);
- if (IS_ERR(bo))
- return bo;
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
-
- return bo;
-}
-
static void __xe_bo_unpin_map_no_vm(void *arg)
{
xe_bo_unpin_map_no_vm(arg);
@@ -2049,13 +2229,14 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
struct xe_bo *bo;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
- dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
+ dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
- (*src)->size, dst_flags);
+ xe_bo_size(*src), dst_flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -2073,10 +2254,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- if (res->mem_type == XE_PL_STOLEN)
+ switch (res->mem_type) {
+ case XE_PL_STOLEN:
return xe_ttm_stolen_gpu_offset(xe);
-
- return res_to_mem_region(res)->dpa_base;
+ case XE_PL_TT:
+ case XE_PL_SYSTEM:
+ return 0;
+ default:
+ return res_to_mem_region(res)->dpa_base;
+ }
+ return 0;
}
/**
@@ -2102,17 +2289,14 @@ int xe_bo_pin_external(struct xe_bo *bo)
if (err)
return err;
- if (xe_bo_is_vram(bo)) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- }
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2149,31 +2333,18 @@ int xe_bo_pin(struct xe_bo *bo)
if (err)
return err;
- /*
- * For pinned objects in on DGFX, which are also in vram, we expect
- * these to be in contiguous VRAM memory. Required eviction / restore
- * during suspend / resume (force restore to same physical address).
- */
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- if (mem_type_is_vram(place->mem_type)) {
- xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
-
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
- vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
- place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
- }
- }
-
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2209,7 +2380,7 @@ void xe_bo_unpin_external(struct xe_bo *bo)
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2231,10 +2402,17 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
+
+ if (bo->backup_obj) {
+ if (xe_bo_is_pinned(bo->backup_obj))
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
}
/**
@@ -2260,6 +2438,8 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
.no_wait_gpu = false,
.gfp_retry_mayfail = true,
};
+ struct pin_cookie cookie;
+ int ret;
if (vm) {
lockdep_assert_held(&vm->lock);
@@ -2269,8 +2449,12 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
+ cookie = xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
- return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ xe_vm_clear_validating(vm, allow_res_evict, cookie);
+
+ return ret;
}
bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
@@ -2342,7 +2526,7 @@ int xe_bo_vmap(struct xe_bo *bo)
* TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
* to use struct iosys_map.
*/
- ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
+ ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
if (ret)
return ret;
@@ -2386,7 +2570,7 @@ typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
u64 value);
static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
- [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_set_pxp_type,
+ [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
};
static int gem_create_user_ext_set_property(struct xe_device *xe,
@@ -2398,7 +2582,7 @@ static int gem_create_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2435,7 +2619,7 @@ static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2759,19 +2943,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
- * @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+int xe_bo_evict(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = force_alloc,
.gfp_retry_mayfail = true,
};
struct ttm_placement placement;
@@ -2813,6 +2995,14 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
+ /*
+ * Compression implies coh_none, therefore we know for sure that WB
+ * memory can't currently use compression, which is likely one of the
+ * common cases.
+ */
+ if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
+ return false;
+
return true;
}
@@ -2888,7 +3078,7 @@ void xe_bo_put(struct xe_bo *bo)
#endif
for_each_tile(tile, xe_bo_device(bo), id)
if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
- might_lock(&bo->ggtt_node[id]->ggtt->lock);
+ xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index ec3e4446d027..02e8cde4c6b2 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,20 +39,23 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
-#define XE_BO_FLAG_GGTT0 BIT(18)
-#define XE_BO_FLAG_GGTT1 BIT(19)
-#define XE_BO_FLAG_GGTT2 BIT(20)
-#define XE_BO_FLAG_GGTT3 BIT(21)
-#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
- XE_BO_FLAG_GGTT1 | \
- XE_BO_FLAG_GGTT2 | \
- XE_BO_FLAG_GGTT3)
-#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
+#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
#define XE_BO_FLAG_GGTTx(tile) \
(XE_BO_FLAG_GGTT0 << (tile)->id)
@@ -115,9 +118,6 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags,
u64 alignment);
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
@@ -235,6 +235,19 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
return xe_bo_addr(bo, 0, page_size);
}
+/**
+ * xe_bo_size() - Xe BO size
+ * @bo: The bo object.
+ *
+ * Simple helper to return Xe BO's size.
+ *
+ * Return: Xe BO's size
+ */
+static inline size_t xe_bo_size(struct xe_bo *bo)
+{
+ return bo->ttm.base.size;
+}
+
static inline u32
__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
@@ -243,7 +256,7 @@ __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo));
XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
return ggtt_node->base.start;
}
@@ -271,11 +284,15 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
+
extern const struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
@@ -293,7 +310,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
{
- return PAGE_ALIGN(bo->ttm.base.size);
+ return PAGE_ALIGN(xe_bo_size(bo));
}
static inline bool xe_bo_has_pages(struct xe_bo *bo)
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 6a40eedd9db1..7484ce55a303 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -10,28 +10,103 @@
#include "xe_ggtt.h"
#include "xe_tile.h"
+typedef int (*xe_pinned_fn)(struct xe_bo *bo);
+
+static int xe_bo_apply_to_pinned(struct xe_device *xe,
+ struct list_head *pinned_list,
+ struct list_head *new_list,
+ const xe_pinned_fn pinned_fn)
+{
+ LIST_HEAD(still_in_list);
+ struct xe_bo *bo;
+ int ret = 0;
+
+ spin_lock(&xe->pinned.lock);
+ while (!ret) {
+ bo = list_first_entry_or_null(pinned_list, typeof(*bo),
+ pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ ret = pinned_fn(bo);
+ if (ret && pinned_list != new_list) {
+ spin_lock(&xe->pinned.lock);
+ /*
+ * We might no longer be pinned, since PM notifier can
+ * call this. If the pinned link is now empty, keep it
+ * that way.
+ */
+ if (!list_empty(&bo->pinned_link))
+ list_move(&bo->pinned_link, pinned_list);
+ spin_unlock(&xe->pinned.lock);
+ }
+ xe_bo_put(bo);
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, new_list);
+ spin_unlock(&xe->pinned.lock);
+
+ return ret;
+}
+
/**
- * xe_bo_evict_all - evict all BOs from VRAM
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
*
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+ int ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
* @xe: xe device
*
- * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
- * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
- * All eviction magic done via TTM calls.
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+}
+
+/**
+ * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
+ * @xe: xe device
*
- * Evict == move VRAM BOs to temporary (typically system) memory.
+ * Evict non-pinned user BOs (via GPU).
*
- * This function should be called before the device goes into a suspend state
- * where the VRAM loses power.
+ * Evict == move VRAM BOs to temporary (typically system) memory.
*/
-int xe_bo_evict_all(struct xe_device *xe)
+int xe_bo_evict_all_user(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
u32 mem_type;
- u8 id;
int ret;
/* User memory */
@@ -57,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe)
}
}
- /* Pinned user memory in VRAM */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &still_in_list);
- spin_unlock(&xe->pinned.lock);
+ return 0;
+}
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
- }
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+ int ret;
- spin_lock(&xe->pinned.lock);
- }
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_evict_all_user(xe);
+ if (ret)
+ return ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
/*
* Wait for all user BO to be evicted as those evictions depend on the
@@ -93,32 +172,47 @@ int xe_bo_evict_all(struct xe_device *xe)
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
- spin_unlock(&xe->pinned.lock);
+ if (ret)
+ return ret;
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret)
- return ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
+ xe_bo_evict_pinned);
+}
- spin_lock(&xe->pinned.lock);
+static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ ret = xe_bo_restore_pinned(bo);
+ if (ret)
+ return ret;
+
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe_bo_device(bo), id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ xe_ggtt_map_bo_unlocked(tile->mem.ggtt, bo);
+ }
}
- spin_unlock(&xe->pinned.lock);
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
@@ -128,111 +222,130 @@ int xe_bo_evict_all(struct xe_device *xe)
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- struct xe_bo *bo;
- int ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+}
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.evicted,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
+/**
+ * xe_bo_restore_late - restore pinned late phase BOs
+ *
+ * @xe: xe device
+ *
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_late(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ int ret, id;
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- if (ret) {
- xe_bo_put(bo);
- return ret;
- }
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
- if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile;
- u8 id;
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- for_each_tile(tile, xe, id) {
- if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
- continue;
+ if (ret)
+ return ret;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
- }
- }
+ if (!IS_DGFX(xe))
+ return 0;
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ /* Pinned user memory in VRAM should be validated on resume */
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_restore_pinned);
- xe_bo_put(bo);
+ /* Wait for restore to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- }
- spin_unlock(&xe->pinned.lock);
+ return ret;
+}
- return 0;
+static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_dma_unmap_pinned);
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
- *
- * @xe: xe device
+ * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed
+ * @xe: The xe device.
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * On pci_device removal we need to drop all dma mappings and move
+ * the data of exported bos out to system. This includes SVM bos and
+ * exported dma-buf bos. This is done by evicting all bos, but
+ * the evict placement in xe_evict_flags() is chosen such that all
+ * bos except those mentioned are purged, and thus their memory
+ * is released.
*
- * This function should be called late, after GT init, on device resume.
+ * For pinned bos, we're unmapping dma.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+void xe_bo_pci_dev_remove_all(struct xe_device *xe)
{
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
- u8 id;
- int ret;
+ unsigned int mem_type;
- if (!IS_DGFX(xe))
- return 0;
+ /*
+ * Move pagemap bos and exported dma-buf to system, and
+ * purge everything else.
+ */
+ for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&xe->ttm, mem_type);
- /* Pinned user memory in VRAM should be validated on resume */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- list_move_tail(&bo->pinned_link, &still_in_list);
- xe_bo_get(bo);
- spin_unlock(&xe->pinned.lock);
+ if (man) {
+ int ret = ttm_resource_manager_evict_all(&xe->ttm, man);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
+ drm_WARN_ON(&xe->drm, ret);
}
-
- spin_lock(&xe->pinned.lock);
}
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- /* Wait for restore to complete */
- for_each_tile(tile, xe, id)
- xe_tile_migrate_wait(tile);
+ xe_bo_pci_dev_remove_pinned(xe);
+}
- return 0;
+static void xe_bo_pinned_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+}
+
+/**
+ * xe_bo_pinned_init() - Initialize pinned bo tracking
+ * @xe: The xe device.
+ *
+ * Initializes the lists and locks required for pinned bo
+ * tracking and registers a callback to dma-unmap
+ * any remaining pinned bos on pci device removal.
+ *
+ * Return: %0 on success, negative error code on error.
+ */
+int xe_bo_pinned_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
index 746894798852..e8385cb7f5e9 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.h
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -9,7 +9,13 @@
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_evict_all_user(struct xe_device *xe);
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe);
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
+void xe_bo_pci_dev_remove_all(struct xe_device *xe);
+
+int xe_bo_pinned_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 15a92e3d4898..ff560d82496f 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -28,8 +28,10 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
- /** @size: Size of this buffer object */
- size_t size;
+ /** @backup_obj: The backup object when pinned and suspended (vram only) */
+ struct xe_bo *backup_obj;
+ /** @parent_obj: Ref to parent bo if this a backup_obj */
+ struct xe_bo *parent_obj;
/** @flags: flags for this buffer object */
u32 flags;
/** @vm: VM this BO is attached to, for extobj this will be NULL */
@@ -82,7 +84,7 @@ struct xe_bo {
u16 cpu_caching;
/** @devmem_allocation: SVM device memory allocation */
- struct drm_gpusvm_devmem devmem_allocation;
+ struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
new file mode 100644
index 000000000000..e9b46a2d0019
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/bitops.h>
+#include <linux/configfs.h>
+#include <linux/find.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "xe_configfs.h"
+#include "xe_module.h"
+
+#include "xe_hw_engine_types.h"
+
+/**
+ * DOC: Xe Configfs
+ *
+ * Overview
+ * =========
+ *
+ * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
+ * The user can create devices under this directory and configure them as necessary
+ * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ *
+ * Create devices
+ * ===============
+ *
+ * In order to create a device, the user has to create a directory inside ``'xe'``::
+ *
+ * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ *
+ * Every device created is populated by the driver with entries that can be
+ * used to configure it::
+ *
+ * /sys/kernel/config/xe/
+ * .. 0000:03:00.0/
+ * ... survivability_mode
+ *
+ * Configure Attributes
+ * ====================
+ *
+ * Survivability mode:
+ * -------------------
+ *
+ * Enable survivability mode on supported cards. This setting only takes
+ * effect when probing the device. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * Allowed engines:
+ * ----------------
+ *
+ * Allow only a set of engine(s) to be available, disabling the other engines
+ * even if they are available in hardware. This is applied after HW fuses are
+ * considered on each tile. Examples:
+ *
+ * Allow only one render and one copy engines, nothing else::
+ *
+ * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Allow only compute engines and first copy engine::
+ *
+ * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Note that the engine names are the per-GT hardware names. On multi-tile
+ * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
+ * and copy engines on each tile.
+ *
+ * The requested configuration may not be supported by the platform and driver
+ * may fail to probe. For example: if at least one copy engine is expected to be
+ * available for migrations, but it's disabled. This is intended for debugging
+ * purposes only.
+ *
+ * Remove devices
+ * ==============
+ *
+ * The created device directories can be removed using ``rmdir``::
+ *
+ * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ */
+
+struct xe_config_device {
+ struct config_group group;
+
+ bool survivability_mode;
+ u64 engines_allowed;
+
+ /* protects attributes */
+ struct mutex lock;
+};
+
+struct engine_info {
+ const char *cls;
+ u64 mask;
+};
+
+/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
+#define MAX_ENGINE_CLASS_CHARS 5
+#define MAX_ENGINE_INSTANCE_CHARS 2
+
+static const struct engine_info engine_info[] = {
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+};
+
+static struct xe_config_device *to_xe_config_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_device, group);
+}
+
+static ssize_t survivability_mode_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->survivability_mode);
+}
+
+static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ bool survivability_mode;
+ int ret;
+
+ ret = kstrtobool(page, &survivability_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = survivability_mode;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+static ssize_t engines_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u64 mask = engine_info[i].mask;
+
+ if ((dev->engines_allowed & mask) == mask) {
+ p += sprintf(p, "%s*\n", engine_info[i].cls);
+ } else if (mask & dev->engines_allowed) {
+ u16 bit0 = __ffs64(mask), bit;
+
+ mask &= dev->engines_allowed;
+
+ for_each_set_bit(bit, (const unsigned long *)&mask, 64)
+ p += sprintf(p, "%s%u\n", engine_info[i].cls,
+ bit - bit0);
+ }
+ }
+
+ return p - page;
+}
+
+static bool lookup_engine_mask(const char *pattern, u64 *mask)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u8 instance;
+ u16 bit;
+
+ if (!str_has_prefix(pattern, engine_info[i].cls))
+ continue;
+
+ pattern += strlen(engine_info[i].cls);
+
+ if (!strcmp(pattern, "*")) {
+ *mask = engine_info[i].mask;
+ return true;
+ }
+
+ if (kstrtou8(pattern, 10, &instance))
+ return false;
+
+ bit = __ffs64(engine_info[i].mask) + instance;
+ if (bit >= fls64(engine_info[i].mask))
+ return false;
+
+ *mask = BIT_ULL(bit);
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t engines_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ size_t patternlen, p;
+ u64 mask, val = 0;
+
+ for (p = 0; p < len; p += patternlen + 1) {
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+
+ patternlen = strcspn(page + p, ",\n");
+ if (patternlen >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, page + p, patternlen);
+ buf[patternlen] = '\0';
+
+ if (!lookup_engine_mask(buf, &mask))
+ return -EINVAL;
+
+ val |= mask;
+ }
+
+ mutex_lock(&dev->lock);
+ dev->engines_allowed = val;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(, survivability_mode);
+CONFIGFS_ATTR(, engines_allowed);
+
+static struct configfs_attribute *xe_config_device_attrs[] = {
+ &attr_survivability_mode,
+ &attr_engines_allowed,
+ NULL,
+};
+
+static void xe_config_device_release(struct config_item *item)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static struct configfs_item_operations xe_config_device_ops = {
+ .release = xe_config_device_release,
+};
+
+static const struct config_item_type xe_config_device_type = {
+ .ct_item_ops = &xe_config_device_ops,
+ .ct_attrs = xe_config_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *xe_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ unsigned int domain, bus, slot, function;
+ struct xe_config_device *dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ if (ret != 4)
+ return ERR_PTR(-EINVAL);
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+ pci_dev_put(pdev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Default values */
+ dev->engines_allowed = U64_MAX;
+
+ config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+
+ mutex_init(&dev->lock);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .make_group = xe_config_make_device_group,
+};
+
+static const struct config_item_type xe_configfs_type = {
+ .ct_group_ops = &xe_config_device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem xe_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "xe",
+ .ci_type = &xe_configfs_type,
+ },
+ },
+};
+
+static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+{
+ struct config_item *item;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&xe_configfs.su_mutex);
+ item = config_group_find_item(&xe_configfs.su_group, name);
+ mutex_unlock(&xe_configfs.su_mutex);
+
+ if (!item)
+ return NULL;
+
+ return to_xe_config_device(item);
+}
+
+/**
+ * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and return
+ * the survivability mode attribute
+ *
+ * Return: survivability mode if config group is found, false otherwise
+ */
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ bool mode;
+
+ if (!dev)
+ return false;
+
+ mode = dev->survivability_mode;
+ config_item_put(&dev->group.cg_item);
+
+ return mode;
+}
+
+/**
+ * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and clear survivability
+ * mode attribute
+ */
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = 0;
+ mutex_unlock(&dev->lock);
+
+ config_item_put(&dev->group.cg_item);
+}
+
+/**
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * @pdev: pci device
+ *
+ * Find the configfs group that belongs to the pci device and return
+ * the mask of engines allowed to be used.
+ *
+ * Return: engine mask with allowed engines
+ */
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ u64 engines_allowed;
+
+ if (!dev)
+ return U64_MAX;
+
+ engines_allowed = dev->engines_allowed;
+ config_item_put(&dev->group.cg_item);
+
+ return engines_allowed;
+}
+
+int __init xe_configfs_init(void)
+{
+ struct config_group *root = &xe_configfs.su_group;
+ int ret;
+
+ config_group_init(root);
+ mutex_init(&xe_configfs.su_mutex);
+ ret = configfs_register_subsystem(&xe_configfs);
+ if (ret) {
+ pr_err("Error %d while registering %s subsystem\n",
+ ret, root->cg_item.ci_namebuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit xe_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&xe_configfs);
+}
+
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
new file mode 100644
index 000000000000..fb8764008089
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_CONFIGFS_H_
+#define _XE_CONFIGFS_H_
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct pci_dev;
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int xe_configfs_init(void);
+void xe_configfs_exit(void);
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
+#else
+static inline int xe_configfs_init(void) { return 0; }
+static inline void xe_configfs_exit(void) { }
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
+static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index d0503959a8ed..26e9d146ccbf 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -20,7 +20,9 @@
#include "xe_pm.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_step.h"
+#include "xe_wa.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -82,9 +84,28 @@ static int sriov_info(struct seq_file *m, void *data)
return 0;
}
+static int workarounds(struct xe_device *xe, struct drm_printer *p)
+{
+ xe_pm_runtime_get(xe);
+ xe_wa_device_dump(xe, p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static int workaround_info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ workarounds(xe, &p);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
+ { .name = "workarounds", .show = workaround_info, },
};
static int forcewake_open(struct inode *inode, struct file *file)
@@ -191,6 +212,41 @@ static const struct file_operations wedged_mode_fops = {
.write = wedged_mode_set,
};
+static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ char buf[32];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t atomic_svm_timeslice_ms_set(struct file *f,
+ const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ u32 atomic_svm_timeslice_ms;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms);
+ if (ret)
+ return ret;
+
+ xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms;
+
+ return size;
+}
+
+static const struct file_operations atomic_svm_timeslice_ms_fops = {
+ .owner = THIS_MODULE,
+ .read = atomic_svm_timeslice_ms_show,
+ .write = atomic_svm_timeslice_ms_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
@@ -211,6 +267,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
+ debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
+ &atomic_svm_timeslice_ms_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -235,4 +294,7 @@ void xe_debugfs_register(struct xe_device *xe)
xe_pxp_debugfs_register(xe->pxp);
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
+
+ if (IS_SRIOV_PF(xe))
+ xe_sriov_pf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81b9d9bb3f57..203e3038cc81 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -80,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
+ ssize_t start,
struct xe_devcoredump *coredump)
{
struct xe_device *xe;
@@ -94,7 +95,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
ss = &coredump->snapshot;
iter.data = buffer;
- iter.start = 0;
+ iter.start = start;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -168,12 +169,34 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
+#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
+
+/**
+ * xe_devcoredump_read() - Read data from the Xe device coredump snapshot
+ * @buffer: Destination buffer to copy the coredump data into
+ * @offset: Offset in the coredump data to start reading from
+ * @count: Number of bytes to read
+ * @data: Pointer to the xe_devcoredump structure
+ * @datalen: Length of the data (unused)
+ *
+ * Reads a chunk of the coredump snapshot data into the provided buffer.
+ * If the devcoredump is smaller than 1.5 GB (XE_DEVCOREDUMP_CHUNK_MAX),
+ * it is read directly from a pre-written buffer. For larger devcoredumps,
+ * the pre-written buffer must be periodically repopulated from the snapshot
+ * state due to kmalloc size limitations.
+ *
+ * Return: Number of bytes copied on success, or a negative error code on failure.
+ */
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
- ssize_t byte_copied;
+ ssize_t byte_copied = 0;
+ u32 chunk_offset;
+ ssize_t new_chunk_position;
+ bool pm_needed = false;
+ int ret = 0;
if (!coredump)
return -ENODEV;
@@ -183,25 +206,45 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
+ pm_needed = ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX;
+ if (pm_needed)
+ xe_pm_runtime_get(gt_to_xe(ss->gt));
+
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
- mutex_unlock(&coredump->lock);
- return -ENODEV;
+ ret = -ENODEV;
+ goto unlock;
}
- if (offset >= ss->read.size) {
- mutex_unlock(&coredump->lock);
- return 0;
+ if (offset >= ss->read.size)
+ goto unlock;
+
+ new_chunk_position = div_u64_rem(offset,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ &chunk_offset);
+
+ if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX ||
+ offset < ss->read.chunk_position) {
+ ss->read.chunk_position = new_chunk_position *
+ XE_DEVCOREDUMP_CHUNK_MAX;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ ss->read.chunk_position, coredump);
}
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
- memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
+unlock:
mutex_unlock(&coredump->lock);
- return byte_copied;
+ if (pm_needed)
+ xe_pm_runtime_put(gt_to_xe(ss->gt));
+
+ return byte_copied ? byte_copied : ret;
}
static void xe_devcoredump_free(void *data)
@@ -254,17 +297,32 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
- xe_pm_runtime_put(xe);
+ ss->read.chunk_position = 0;
/* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
+ ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
+ ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
+ GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ 0, coredump);
+ } else {
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
+ coredump);
+ xe_devcoredump_snapshot_free(ss);
+ }
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
+put_pm:
+ xe_pm_runtime_put(xe);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
@@ -273,13 +331,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct xe_guc *guc = exec_queue_to_guc(q);
- u32 adj_logical_mask = q->logical_mask;
- u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
-
unsigned int fw_ref;
bool cookie;
- int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
@@ -295,14 +349,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
cookie = dma_fence_begin_signalling();
- for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
- if (adj_logical_mask & BIT(i)) {
- adj_logical_mask |= width_mask << i;
- i += q->width;
- } else {
- ++i;
- }
- }
/* keep going if fw fails as we still want to save the memory and SW data */
fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
@@ -425,7 +471,7 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (offset & 3)
drm_printf(p, "Offset not word aligned: %zu", offset);
- line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC);
if (!line_buff) {
drm_printf(p, "Failed to allocate line buffer\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 1a1d16a96b2d..a174385a6d83 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -66,6 +66,8 @@ struct xe_devcoredump_snapshot {
struct {
/** @read.size: size of devcoredump in human readable format */
ssize_t size;
+ /** @read.chunk_position: position of devcoredump chunk */
+ ssize_t chunk_position;
/** @read.buffer: buffer of devcoredump in human readable format */
char *buffer;
} read;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 00191227bc95..6ece4defa9df 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -23,8 +23,10 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
+#include "xe_bo_evict.h"
#include "xe_debugfs.h"
#include "xe_devcoredump.h"
+#include "xe_device_sysfs.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -38,12 +40,14 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
+#include "xe_guc_pc.h"
#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
-#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_nvm.h"
#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
@@ -64,6 +68,7 @@
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
@@ -400,9 +405,6 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
- if (!IS_ERR_OR_NULL(xe->mem.shrinker))
- xe_shrinker_destroy(xe->mem.shrinker);
-
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -436,13 +438,14 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- xe->mem.shrinker = xe_shrinker_create(xe);
- if (IS_ERR(xe->mem.shrinker))
- return ERR_CAST(xe->mem.shrinker);
+ err = xe_shrinker_create(xe);
+ if (err)
+ goto err;
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
+ xe->atomic_svm_timeslice_ms = 5;
err = xe_irq_init(xe);
if (err)
@@ -467,10 +470,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external_vram);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ err = xe_bo_pinned_init(xe);
+ if (err)
+ goto err;
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
WQ_MEM_RECLAIM);
@@ -492,10 +494,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- err = xe_display_create(xe);
- if (WARN_ON(err))
- goto err;
-
return xe;
err:
@@ -505,7 +503,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
static bool xe_driver_flr_disabled(struct xe_device *xe)
{
- return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ if (IS_SRIOV_VF(xe))
+ return true;
+
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
+ return true;
+ }
+
+ return false;
}
/*
@@ -523,7 +529,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
*/
static void __xe_driver_flr(struct xe_device *xe)
{
- const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
@@ -569,10 +575,8 @@ static void __xe_driver_flr(struct xe_device *xe)
static void xe_driver_flr(struct xe_device *xe)
{
- if (xe_driver_flr_disabled(xe)) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ if (xe_driver_flr_disabled(xe))
return;
- }
__xe_driver_flr(xe);
}
@@ -677,6 +681,7 @@ static void sriov_update_device_info(struct xe_device *xe)
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
@@ -697,6 +702,9 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
+ xe_wa_device_init(xe);
+ xe_wa_process_device_oob(xe);
+
err = xe_mmio_probe_early(xe);
if (err)
return err;
@@ -706,7 +714,7 @@ int xe_device_probe_early(struct xe_device *xe)
sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err) {
+ if (err || xe_survivability_mode_is_requested(xe)) {
int save_err = err;
/*
@@ -729,6 +737,7 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
static int probe_has_flat_ccs(struct xe_device *xe)
{
@@ -781,51 +790,18 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- err = xe_ttm_sys_mgr_init(xe);
- if (err)
- return err;
-
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
if (err)
return err;
-
- /*
- * Only after this point can GT-specific MMIO operations
- * (including things like communication with the GuC)
- * be performed.
- */
- xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
- if (IS_SRIOV_VF(xe)) {
- xe_guc_comm_init_early(&tile->primary_gt->uc.guc);
- err = xe_gt_sriov_vf_bootstrap(tile->primary_gt);
- if (err)
- return err;
- err = xe_gt_sriov_vf_query_config(tile->primary_gt);
- if (err)
- return err;
- }
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- err = xe_memirq_init(&tile->memirq);
- if (err)
- return err;
- }
-
- for_each_gt(gt, xe, id) {
- err = xe_gt_init_hwconfig(gt);
- if (err)
- return err;
}
- err = xe_devcoredump_init(xe);
- if (err)
- return err;
-
/*
* From here on, if a step fails, make sure a Driver-FLR is triggereed
*/
@@ -847,6 +823,14 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ /*
+ * Allow allocations only now to ensure xe_display_init_early()
+ * is the first to allocate, always.
+ */
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
+
/* Allocate and map stolen after potential VRAM resize */
err = xe_ttm_stolen_mgr_init(xe);
if (err)
@@ -878,6 +862,16 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ if (xe->tiles->media_gt &&
+ XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_DEVICE_WA_DISABLE(xe, 15015404425);
+
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
+
+ xe_nvm_init(xe);
+
err = xe_heci_gsc_init(xe);
if (err)
return err;
@@ -908,12 +902,20 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ goto err_unregister_display;
+
xe_debugfs_register(xe);
err = xe_hwmon_register(xe);
if (err)
goto err_unregister_display;
+ err = xe_i2c_probe(xe);
+ if (err)
+ goto err_unregister_display;
+
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
@@ -931,7 +933,11 @@ void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
+ xe_nvm_fini(xe);
+
drm_dev_unplug(&xe->drm);
+
+ xe_bo_pci_dev_remove_all(xe);
}
void xe_device_shutdown(struct xe_device *xe)
@@ -972,38 +978,15 @@ void xe_device_wmb(struct xe_device *xe)
xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
}
-/**
- * xe_device_td_flush() - Flush transient L3 cache entries
- * @xe: The device
- *
- * Display engine has direct access to memory and is never coherent with L3/L4
- * caches (or CPU caches), however KMD is responsible for specifically flushing
- * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
- * can happen from such a surface without seeing corruption.
- *
- * Display surfaces can be tagged as transient by mapping it using one of the
- * various L3:XD PAT index modes on Xe2.
- *
- * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
- * at the end of each submission via PIPE_CONTROL for compute/render, since SA
- * Media is not coherent with L3 and we want to support render-vs-media
- * usescases. For other engines like copy/blt the HW internally forces uncached
- * behaviour, hence why we can skip the TDF on such platforms.
+/*
+ * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
*/
-void xe_device_td_flush(struct xe_device *xe)
+static void tdf_request_sync(struct xe_device *xe)
{
- struct xe_gt *gt;
unsigned int fw_ref;
+ struct xe_gt *gt;
u8 id;
- if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
- return;
-
- if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) {
- xe_device_l2_flush(xe);
- return;
- }
-
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
continue;
@@ -1013,6 +996,7 @@ void xe_device_td_flush(struct xe_device *xe)
return;
xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+
/*
* FIXME: We can likely do better here with our choice of
* timeout. Currently we just assume the worst case, i.e. 150us,
@@ -1043,15 +1027,52 @@ void xe_device_l2_flush(struct xe_device *xe)
return;
spin_lock(&gt->global_invl_lock);
- xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
+ xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
+
spin_unlock(&gt->global_invl_lock);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
+/**
+ * xe_device_td_flush() - Flush transient L3 cache entries
+ * @xe: The device
+ *
+ * Display engine has direct access to memory and is never coherent with L3/L4
+ * caches (or CPU caches), however KMD is responsible for specifically flushing
+ * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
+ * can happen from such a surface without seeing corruption.
+ *
+ * Display surfaces can be tagged as transient by mapping it using one of the
+ * various L3:XD PAT index modes on Xe2.
+ *
+ * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
+ * at the end of each submission via PIPE_CONTROL for compute/render, since SA
+ * Media is not coherent with L3 and we want to support render-vs-media
+ * usescases. For other engines like copy/blt the HW internally forces uncached
+ * behaviour, hence why we can skip the TDF on such platforms.
+ */
+void xe_device_td_flush(struct xe_device *xe)
+{
+ struct xe_gt *root_gt;
+
+ if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
+ return;
+
+ root_gt = xe_root_mmio_gt(xe);
+ if (XE_WA(root_gt, 16023588340)) {
+ /* A transient flush is not sufficient: flush the L2 */
+ xe_device_l2_flush(xe);
+ } else {
+ xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc);
+ tdf_request_sync(xe);
+ xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc);
+ }
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
{
return xe_device_has_flat_ccs(xe) ?
@@ -1154,7 +1175,8 @@ void xe_device_declare_wedged(struct xe_device *xe)
/* Notify userspace of wedged device */
drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 0bc3bc8e6803..bc802e066a7d 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -60,35 +60,32 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
return &xe->tiles[0];
}
+/*
+ * Highest GT/tile count for any platform. Used only for memory allocation
+ * sizing. Any logic looping over GTs or mapping userspace GT IDs into GT
+ * structures should use the per-platform xe->info.max_gt_per_tile instead.
+ */
#define XE_MAX_GT_PER_TILE 2
-static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
-{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
- gt_id = 0;
-
- return gt_id ? tile->media_gt : tile->primary_gt;
-}
-
static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
{
- struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ struct xe_tile *tile;
struct xe_gt *gt;
- /*
- * FIXME: This only works for now because multi-tile and standalone
- * media are mutually exclusive on the platforms we have today.
- *
- * id => GT mapping may change once we settle on how we want to handle
- * our UAPI.
- */
- if (MEDIA_VER(xe) >= 13) {
- gt = xe_tile_get_gt(root_tile, gt_id);
- } else {
- if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
- gt_id = 0;
-
- gt = xe->tiles[gt_id].primary_gt;
+ if (gt_id >= xe->info.tile_count * xe->info.max_gt_per_tile)
+ return NULL;
+
+ tile = &xe->tiles[gt_id / xe->info.max_gt_per_tile];
+ switch (gt_id % xe->info.max_gt_per_tile) {
+ default:
+ xe_assert(xe, false);
+ fallthrough;
+ case 0:
+ gt = tile->primary_gt;
+ break;
+ case 1:
+ gt = tile->media_gt;
+ break;
}
if (!gt)
@@ -130,14 +127,14 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe)
for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
for_each_if((tile__) = &(xe__)->tiles[(id__)])
-/*
- * FIXME: This only works for now since multi-tile and standalone media
- * happen to be mutually exclusive. Future platforms may change this...
- */
#define for_each_gt(gt__, xe__, id__) \
- for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
+ for ((id__) = 0; (id__) < (xe__)->info.tile_count * (xe__)->info.max_gt_per_tile; (id__)++) \
for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
+#define for_each_gt_on_tile(gt__, tile__, id__) \
+ for_each_gt((gt__), (tile__)->xe, (id__)) \
+ for_each_if((gt__)->tile == (tile__))
+
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
return &gt->pm.fw;
@@ -195,6 +192,8 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+int xe_is_injection_active(void);
+
/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7efbd4c52791..bd9015761aa0 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -3,14 +3,16 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
-#include <drm/drm_managed.h>
-
#include "xe_device.h"
#include "xe_device_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_pcode.h"
#include "xe_pm.h"
/**
@@ -22,6 +24,12 @@
*
* vram_d3cold_threshold - Report/change vram used threshold(in MB) below
* which vram save/restore is permissible during runtime D3cold entry/exit.
+ *
+ * lb_fan_control_version - Fan control version provisioned by late binding.
+ * Exposed only if supported by the device.
+ *
+ * lb_voltage_regulator_version - Voltage regulator version provisioned by late
+ * binding. Exposed only if supported by the device.
*/
static ssize_t
@@ -63,11 +71,230 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static ssize_t
+lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
+
+static ssize_t
+lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
+
+static int late_bind_create_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret) {
+ if (ret == -ENXIO) {
+ drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
+ ret = 0;
+ }
+ goto out;
+ }
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (ret)
+ goto out;
+ }
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static void late_bind_remove_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+}
+
+/**
+ * DOC: PCIe Gen5 Limitations
+ *
+ * Default link speed of discrete GPUs is determined by configuration parameters
+ * stored in their flash memory, which are subject to override through user
+ * initiated firmware updates. It has been observed that devices configured with
+ * PCIe Gen5 as their default link speed can come across link quality issues due
+ * to host or motherboard limitations and may have to auto-downgrade their link
+ * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
+ * firmware updates rather risky on such setups. It is required to ensure that
+ * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
+ * pushing the firmware image with PCIe Gen5 as default configuration. This can
+ * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
+ * denote if the device is capable of auto-downgrading its link to PCIe Gen4
+ * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
+ * `capable` respectively.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
+ *
+ * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
+ * link downgrade incapable device and facing link instability due to host or
+ * motherboard limitations can result in driver failing to bind to the device,
+ * making further firmware updates impossible with RMA being the only last
+ * resort.
+ *
+ * Link downgrade status of auto link downgrade capable devices is available
+ * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
+ * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
+ * link training (which is the optimal scenario) and ``1`` means the device has
+ * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
+ */
+
+static ssize_t
+auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 cap, val;
+
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
+ xe_pm_runtime_put(xe);
+
+ cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
+ return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
+
+static ssize_t
+auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ /* default the auto_link_downgrade status to 0 */
+ u32 val = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_pcode_read(xe_device_get_root_tile(xe),
+ PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
+ &val, NULL);
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
+
+static const struct attribute *auto_link_downgrade_attrs[] = {
+ &dev_attr_auto_link_downgrade_capable.attr,
+ &dev_attr_auto_link_downgrade_status.attr,
+ NULL
+};
+
static void xe_device_sysfs_fini(void *arg)
{
struct xe_device *xe = arg;
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (xe->d3cold.capable)
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
+ late_bind_remove_files(xe->drm.dev);
+ }
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -75,9 +302,21 @@ int xe_device_sysfs_init(struct xe_device *xe)
struct device *dev = xe->drm.dev;
int ret;
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret)
- return ret;
+ if (xe->d3cold.capable) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret)
+ return ret;
+ }
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (ret)
+ return ret;
+
+ ret = late_bind_create_files(dev);
+ if (ret)
+ return ret;
+ }
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9f8667ebba85..d4d2c6854790 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -21,7 +21,9 @@
#include "xe_platform_types.h"
#include "xe_pmu_types.h"
#include "xe_pt_types.h"
+#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
+#include "xe_sriov_vf_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
#include "xe_ttm_vram_mgr_types.h"
@@ -30,13 +32,11 @@
#define TEST_VM_OPS_ERROR
#endif
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "soc/intel_pch.h"
-#include "intel_display_core.h"
-#include "intel_display_device.h"
-#endif
-
+struct dram_info;
+struct intel_display;
+struct intel_dg_nvm_dev;
struct xe_ggtt;
+struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
@@ -107,6 +107,9 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -120,8 +123,7 @@ struct xe_vram_region {
* This is generated when remap device memory as ZONE_DEVICE
*/
resource_size_t hpa_base;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
+#endif
};
/**
@@ -295,6 +297,8 @@ struct xe_device {
u8 vram_flags;
/** @info.tile_count: Number of tiles */
u8 tile_count;
+ /** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
+ u8 max_gt_per_tile;
/** @info.gt_count: Total number of GTs for entire device */
u8 gt_count;
/** @info.vm_max_level: Max VM level */
@@ -314,14 +318,22 @@ struct xe_device {
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
+ /** @info.has_fan_control: Device supports fan control */
+ u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_gsc_nvm: Device has gsc non-volatile memory */
+ u8 has_gsc_nvm:1;
/** @info.has_heci_cscfi: device has heci cscfi */
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
+ /** @info.has_mbx_power_limits: Device has support to manage power limits using
+ * pcode mailbox commands.
+ */
+ u8 has_mbx_power_limits:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
@@ -330,8 +342,12 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
+ u8 has_64bit_timestamp:1;
/** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
+ /** @info.needs_scratch: needs scratch page for oob prefetch to work */
+ u8 needs_scratch:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -349,6 +365,19 @@ struct xe_device {
u8 skip_pcode:1;
} info;
+ /** @wa_active: keep track of active workarounds */
+ struct {
+ /** @wa_active.oob: bitmap with active OOB workarounds */
+ unsigned long *oob;
+
+ /**
+ * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
+ * of XE_DEVICE_WA() - it can only be called on initialization after
+ * Device OOB WAs have been processed.
+ */
+ bool oob_initialized;
+ } wa_active;
+
/** @survivability: survivability information for device */
struct xe_survivability survivability;
@@ -395,10 +424,12 @@ struct xe_device {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
- /** @sriov.pf: PF specific data */
- struct xe_device_pf pf;
- /** @sriov.vf: VF specific data */
- struct xe_device_vf vf;
+ union {
+ /** @sriov.pf: PF specific data */
+ struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
+ };
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
@@ -418,12 +449,22 @@ struct xe_device {
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external_vram: pinned external BO in vram*/
- struct list_head external_vram;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
@@ -481,6 +522,10 @@ struct xe_device {
const struct xe_pat_table_entry *table;
/** @pat.n_entries: Number of PAT entries */
int n_entries;
+ /** @pat.ats_entry: PAT entry for PCIe ATS responses */
+ const struct xe_pat_table_entry *pat_ats;
+ /** @pat.pta_entry: PAT entry for page table accesses */
+ const struct xe_pat_table_entry *pat_pta;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
@@ -506,6 +551,9 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+ struct notifier_block pm_notifier;
+
/** @pmt: Support the PMT driver callback interface */
struct {
/** @pmt.lock: protect access for telemetry data */
@@ -524,6 +572,9 @@ struct xe_device {
/** @heci_gsc: graphics security controller */
struct xe_heci_gsc heci_gsc;
+ /** @nvm: discrete graphics non-volatile memory */
+ struct intel_dg_nvm_dev *nvm;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -552,6 +603,12 @@ struct xe_device {
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
+ /** @i2c: I2C host controller */
+ struct xe_i2c *i2c;
+
+ /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
+ u32 atomic_svm_timeslice_ms;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -560,6 +617,14 @@ struct xe_device {
u8 vm_inject_error_position;
#endif
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ /**
+ * @global_total_pages: global GPU page usage tracked for gpu_mem
+ * tracepoints
+ */
+ atomic64_t global_total_pages;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -569,27 +634,9 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display display;
- enum intel_pch pch_type;
-
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ struct intel_display *display;
+
+ const struct dram_info *dram_info;
/*
* edram size in MB.
diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
new file mode 100644
index 000000000000..3a0c4ccc4224
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
@@ -0,0 +1,2 @@
+15015404425 PLATFORM(LUNARLAKE)
+ PLATFORM(PANTHERLAKE)
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index f7a20264ea33..346f857f3837 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -233,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
- XE_WARN_ON(xe_bo_evict(bo, false));
+ XE_WARN_ON(xe_bo_evict(bo));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 31f688e953d7..f931ff9b1ec0 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -167,7 +167,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo)
static void bo_meminfo(struct xe_bo *bo,
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
{
- u64 sz = bo->size;
+ u64 sz = xe_bo_size(bo);
u32 mem_type = bo->ttm.resource->mem_type;
xe_bo_assert_held(bo);
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d61650d4aa0b..95242a375e54 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -9,7 +9,7 @@
#include <drm/drm_drv.h>
#define DRIVER_NAME "xe"
-#define DRIVER_DESC "Intel Xe Graphics"
+#define DRIVER_DESC "Intel Xe2 Graphics"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index e2bb156c71fb..af7916315ac6 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -258,11 +258,13 @@ static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
struct eu_stall_open_properties *props)
{
- if (value >= xe->info.gt_count) {
+ struct xe_gt *gt = xe_device_get_gt(xe, value);
+
+ if (!gt) {
drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
return -EINVAL;
}
- props->gt = xe_device_get_gt(xe, value);
+ props->gt = gt;
return 0;
}
@@ -283,7 +285,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -313,7 +315,7 @@ static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index b75adfc99fb7..44364c042ad7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -176,8 +176,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (xe_exec_queue_is_parallel(q)) {
- err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ err = copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 606922d9dd73..8991b4aed440 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -114,7 +114,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
- struct xe_vm *vm = q->vm;
int i, err;
u32 flags = 0;
@@ -132,32 +131,20 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
flags |= XE_LRC_CREATE_RUNALONE;
}
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- return err;
- }
-
for (i = 0; i < q->width; ++i) {
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
- goto err_unlock;
+ goto err_lrc;
}
}
- if (vm)
- xe_vm_unlock(vm);
-
err = q->ops->init(q);
if (err)
goto err_lrc;
return 0;
-err_unlock:
- if (vm)
- xe_vm_unlock(vm);
err_lrc:
for (i = i - 1; i >= 0; --i)
xe_lrc_put(q->lrc[i]);
@@ -479,7 +466,7 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -518,7 +505,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -618,13 +605,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
- err = __copy_from_user(eci, user_eci,
- sizeof(struct drm_xe_engine_class_instance) *
- len);
+ err = copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) * len);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
- if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
+ if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
return -EINVAL;
if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
@@ -830,7 +816,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
- u32 old_ts, new_ts;
+ u64 old_ts, new_ts;
int idx;
/*
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 4f6784e5abf8..c59a9b330697 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
fw->gt = gt;
spin_lock_init(&fw->lock);
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (xe->info.graphics_verx100 >= 1270) {
init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
@@ -67,10 +64,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER);
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index ed9183599e31..6581cb0f0e59 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -18,8 +18,8 @@
" *\n" \
" * This file was generated from rules: %s\n" \
" */\n" \
- "#ifndef _GENERATED_XE_WA_OOB_\n" \
- "#define _GENERATED_XE_WA_OOB_\n" \
+ "#ifndef _GENERATED_%s_\n" \
+ "#define _GENERATED_%s_\n" \
"\n" \
"enum {\n"
@@ -52,7 +52,7 @@ static char *strip(char *line, size_t linelen)
}
#define MAX_LINE_LEN 4096
-static int parse(FILE *input, FILE *csource, FILE *cheader)
+static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
{
char line[MAX_LINE_LEN + 1];
char *name, *prev_name = NULL, *rules;
@@ -96,7 +96,7 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
}
if (name) {
- fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
+ fprintf(cheader, "\t%s_%s = %u,\n", prefix, name, idx);
/* Close previous entry before starting a new one */
if (idx)
@@ -118,7 +118,33 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (idx)
fprintf(csource, ") },\n");
- fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
+ fprintf(cheader, "\t_%s_COUNT = %u\n", prefix, idx);
+
+ return 0;
+}
+
+static int fn_to_prefix(const char *fn, char *prefix, size_t size)
+{
+ size_t len;
+
+ fn = basename(fn);
+ len = strlen(fn);
+
+ if (len > size - 1)
+ return -ENAMETOOLONG;
+
+ memcpy(prefix, fn, len + 1);
+
+ for (char *p = prefix; *p; p++) {
+ switch (*p) {
+ case '.':
+ *p = '\0';
+ return 0;
+ default:
+ *p = toupper(*p);
+ break;
+ }
+ }
return 0;
}
@@ -141,6 +167,7 @@ int main(int argc, const char *argv[])
[ARGS_CHEADER] = { .fn = argv[3], .mode = "w" },
};
int ret = 1;
+ char prefix[128];
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
@@ -148,6 +175,9 @@ int main(int argc, const char *argv[])
return 1;
}
+ if (fn_to_prefix(args[ARGS_CHEADER].fn, prefix, sizeof(prefix)) < 0)
+ return 1;
+
for (int i = 0; i < _ARGS_COUNT; i++) {
args[i].f = fopen(args[i].fn, args[i].mode);
if (!args[i].f) {
@@ -157,9 +187,10 @@ int main(int argc, const char *argv[])
}
}
- fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn);
+ fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn, prefix, prefix);
+
ret = parse(args[ARGS_INPUT].f, args[ARGS_CSOURCE].f,
- args[ARGS_CHEADER].f);
+ args[ARGS_CHEADER].f, prefix);
if (!ret)
fprintf(args[ARGS_CHEADER].f, FOOTER);
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 5fcb2b4c2c13..29d4d3f51da1 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <kunit/visibility.h>
#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -22,12 +23,13 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -64,13 +66,9 @@
* give us the correct placement for free.
*/
-static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
- u64 pte;
-
- pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pte |= XE_PAGE_PRESENT;
+ u64 pte = XE_PAGE_PRESENT;
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
pte |= XE_GGTT_PTE_DM;
@@ -78,13 +76,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
return pte;
}
-static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
+ pte = xelp_ggtt_pte_flags(bo, pat_index);
xe_assert(xe, pat_index <= 3);
@@ -149,8 +146,9 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
xe_tile_assert(ggtt->tile, start < end);
if (ggtt->scratch)
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
- pat_index);
+ scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
+ ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
+ pat_index);
else
scratch_pte = 0;
@@ -160,6 +158,22 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+/**
+ * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
+ * @tile: &xe_tile
+ *
+ * Allocates a &xe_ggtt for a given tile.
+ *
+ * Return: &xe_ggtt on success, or NULL when out of memory.
+ */
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (ggtt)
+ ggtt->tile = tile;
+ return ggtt;
+}
+
static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -176,6 +190,13 @@ static void ggtt_fini(void *arg)
ggtt->scratch = NULL;
}
+#ifdef CONFIG_LOCKDEP
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{
+ might_lock(&ggtt->lock);
+}
+#endif
+
static void primelockdep(struct xe_ggtt *ggtt)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -187,20 +208,43 @@ static void primelockdep(struct xe_ggtt *ggtt)
}
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
- .pte_encode_bo = xelp_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
+{
+ drm_mm_init(&ggtt->mm, reserved,
+ ggtt->size - reserved);
+ mutex_init(&ggtt->lock);
+ primelockdep(ggtt);
+}
+
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
+{
+ ggtt->size = size;
+ __xe_ggtt_init_early(ggtt, reserved);
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
+
+static void dev_fini_ggtt(void *arg)
+{
+ struct xe_ggtt *ggtt = arg;
+
+ drain_workqueue(ggtt->wq);
+}
+
/**
* xe_ggtt_init_early - Early GGTT initialization
* @ggtt: the &xe_ggtt to be initialized
@@ -219,7 +263,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
unsigned int gsm_size;
int err;
- if (IS_SRIOV_VF(xe))
+ if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
else
gsm_size = probe_gsm_size(pdev);
@@ -247,18 +291,18 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
-
- drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
- ggtt->size - xe_wopcm_size(xe));
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
+ __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
if (err)
return err;
+ err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
+ if (err)
+ return err;
+
if (IS_SRIOV_VF(xe)) {
- err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+ err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
if (err)
return err;
}
@@ -365,7 +409,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_FLAG_PINNED;
+ flags = 0;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_FLAG_SYSTEM;
else
@@ -377,7 +421,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
goto err;
}
- xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
+ xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
xe_ggtt_initial_clear(ggtt);
@@ -429,16 +473,17 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
* @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
*
- * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
+ * To be used in cases where ggtt->lock is already taken.
+ * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
{
struct xe_ggtt *ggtt = node->ggtt;
int err;
@@ -447,14 +492,13 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
+ lockdep_assert_held(&ggtt->lock);
node->base.color = 0;
node->base.start = start;
node->base.size = end - start;
- mutex_lock(&ggtt->lock);
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
@@ -466,27 +510,72 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
}
/**
- * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
* @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_node_insert_balloon() for details.
+ * To be used in cases where ggtt->lock is already taken.
+ * See xe_ggtt_node_insert_balloon_locked() for details.
*/
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
{
- if (!node || !node->ggtt)
+ if (!xe_ggtt_node_allocated(node))
return;
- if (!drm_mm_node_allocated(&node->base))
- goto free_node;
+ lockdep_assert_held(&node->ggtt->lock);
xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
- mutex_lock(&node->ggtt->lock);
drm_mm_remove_node(&node->base);
- mutex_unlock(&node->ggtt->lock);
+}
-free_node:
- xe_ggtt_node_fini(node);
+static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
+{
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 __maybe_unused wopcm = xe_wopcm_size(xe);
+
+ xe_tile_assert(tile, start >= wopcm);
+ xe_tile_assert(tile, start + size < ggtt->size - wopcm);
+}
+
+/**
+ * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
+ * @ggtt: the &xe_ggtt struct instance
+ * @shift: change to the location of area provisioned for current VF
+ *
+ * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
+ * to represent allocations in range formerly assigned to current VF, before the range changed.
+ * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
+ *
+ * The function has no ability of failing - because it shifts existing nodes, without
+ * any additional processing. If the nodes were successfully existing at the old address,
+ * they will do the same at the new one. A fail inside this function would indicate that
+ * the list of nodes was either already damaged, or that the shift brings the address range
+ * outside of valid bounds. Both cases justify an assert rather than error code.
+ */
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
+{
+ struct xe_tile *tile __maybe_unused = ggtt->tile;
+ struct drm_mm_node *node, *tmpn;
+ LIST_HEAD(temp_list_head);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
+ xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
+
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
+ drm_mm_remove_node(node);
+ list_add(&node->node_list, &temp_list_head);
+ }
+
+ list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
+ list_del(&node->node_list);
+ node->start += shift;
+ drm_mm_reserve_node(&ggtt->mm, node);
+ xe_tile_assert(tile, drm_mm_node_allocated(node));
+ }
}
/**
@@ -537,12 +626,12 @@ int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
* xe_ggtt_node_init - Initialize %xe_ggtt_node struct
* @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
*
- * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This function will allocate the struct %xe_ggtt_node and return its pointer.
* This struct will then be freed after the node removal upon xe_ggtt_node_remove()
- * or xe_ggtt_node_remove_balloon().
+ * or xe_ggtt_node_remove_balloon_locked().
* Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
* in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
*
* Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
**/
@@ -564,7 +653,7 @@ struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
* @node: the &xe_ggtt_node to be freed
*
* If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
* this function needs to be called to free the %xe_ggtt_node struct
**/
void xe_ggtt_node_fini(struct xe_ggtt_node *node)
@@ -589,26 +678,59 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
/**
* xe_ggtt_map_bo - Map the BO into GGTT
* @ggtt: the &xe_ggtt where node will be mapped
+ * @node: the &xe_ggtt_node where this BO is mapped
* @bo: the &xe_bo to be mapped
+ * @pat_index: Which pat_index to use.
*/
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index)
{
- u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start;
- u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
+ u64 start, pte, end;
+ struct xe_res_cursor cur;
+
+ if (XE_WARN_ON(!node))
return;
- start = bo->ggtt_node[ggtt->tile->id]->base.start;
+ start = node->base.start;
+ end = start + xe_bo_size(bo);
- for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
- pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
- ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
+ pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+ if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
+ xe_assert(xe_bo_device(bo), bo->ttm.ttm);
+
+ for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte | xe_res_dma(&cur));
+ } else {
+ /* Prepend GPU offset */
+ pte |= vram_region_gpu_offset(bo->ttm.resource);
+
+ for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte + cur.start);
}
}
+/**
+ * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ *
+ * This is used to restore a GGTT mapping after suspend.
+ */
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
+ mutex_unlock(&ggtt->lock);
+}
+
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
@@ -621,7 +743,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
return 0;
}
@@ -640,12 +762,15 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
- bo->size, alignment, 0, start, end, 0);
+ xe_bo_size(bo), alignment, 0, start, end, 0);
if (err) {
xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
bo->ggtt_node[tile_id] = NULL;
} else {
- xe_ggtt_map_bo(ggtt, bo);
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
}
mutex_unlock(&ggtt->lock);
@@ -698,7 +823,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
@@ -841,3 +966,30 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
return total;
}
+
+/**
+ * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
+ * @ggtt: &xe_ggtt
+ * @bo: &xe_bo
+ * @pat_index: The pat_index for the PTE.
+ *
+ * This function returns the pte_flags for a given BO, without address.
+ * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
+ */
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
+ struct xe_bo *bo, u16 pat_index)
+{
+ return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+}
+
+/**
+ * xe_ggtt_read_pte - Read a PTE from the GGTT
+ * @ggtt: &xe_ggtt
+ * @offset: the offset for which the mapping should be read.
+ *
+ * Used by testcases, and by display reading out an inherited bios FB.
+ */
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
+{
+ return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 27e7d67de004..fbe1e397d05d 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -9,22 +9,28 @@
#include "xe_ggtt_types.h"
struct drm_printer;
+struct xe_tile;
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size);
int xe_ggtt_init(struct xe_ggtt *ggtt);
struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
void xe_ggtt_node_fini(struct xe_ggtt_node *node);
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
- u64 start, u64 size);
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift);
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
u32 size, u32 align, u32 mm_flags);
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index);
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
@@ -38,4 +44,14 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
+#ifndef CONFIG_LOCKDEP
+static inline void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{ }
+#else
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt);
+#endif
+
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, struct xe_bo *bo, u16 pat_index);
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index cb02b7994a9a..c5e999d58ff2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -74,8 +74,8 @@ struct xe_ggtt_node {
* Which can vary from platform to platform.
*/
struct xe_ggtt_pt_ops {
- /** @pte_encode_bo: Encode PTE address for a given BO */
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @pte_encode_flags: Encode PTE flags for a given BO */
+ u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
/** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index c250ea773491..308061f0cf37 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
- drm_sched_resubmit_jobs(&sched->base);
+ struct drm_sched_job *s_job;
+
+ list_for_each_entry(s_job, &sched->base.pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ sched->base.ops->run_job(s_job);
+ }
}
static inline bool
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index fd41113f8572..1d84bf2f2cef 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -59,7 +59,8 @@ static int memcpy_fw(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
- xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
+ xe_map_memset(xe, &gsc->private->vmap, fw_size, 0,
+ xe_bo_size(gsc->private) - fw_size);
kfree(storage);
@@ -82,7 +83,8 @@ static int emit_gsc_upload(struct xe_gsc *gsc)
bb->cs[bb->len++] = GSC_FW_LOAD;
bb->cs[bb->len++] = lower_32_bits(offset);
bb->cs[bb->len++] = upper_32_bits(offset);
- bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
+ bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) |
+ GSC_FW_LOAD_LIMIT_VALID;
job = xe_bb_create_job(gsc->q, bb);
if (IS_ERR(job)) {
@@ -555,6 +557,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
+void xe_gsc_stop_prepare(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
+
+ /*
+ * If the GSC FW load or the proxy init are interrupted, the only way
+ * to recover it is to do an FLR and reload the GSC from scratch.
+ * Therefore, let's wait for the init to complete before stopping
+ * operations. The proxy init is the last step, so we can just wait on
+ * that
+ */
+ ret = xe_gsc_wait_for_proxy_init_done(gsc);
+ if (ret)
+ xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index d99f66c38075..b8b8e0810ad9 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -16,6 +16,7 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 8cf70b228ff3..464282a89eef 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -23,6 +23,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_tile.h"
/*
* GSC proxy:
@@ -71,6 +72,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* Proxy init can take up to 500ms, so wait double that for safety */
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_CURRENT_STATE,
+ HECI1_FWSTS1_PROXY_STATE_NORMAL,
+ USEC_PER_SEC, NULL, false);
+}
+
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -472,7 +484,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
}
/* no multi-tile devices with this feature yet */
- if (tile->id > 0) {
+ if (!xe_tile_is_root(tile)) {
xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index fdef56995cd4..765602221dbc 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -12,6 +12,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 10a9e3c72b36..c8eda36546d3 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -12,8 +12,10 @@
#include <generated/xe_wa_oob.h>
+#include "instructions/xe_alu_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -110,13 +112,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
if (!fw_ref)
return;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
- xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
@@ -144,30 +146,23 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
static void gt_reset_worker(struct work_struct *w);
-static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
+ long timeout_jiffies)
{
struct xe_sched_job *job;
- struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
- bb = xe_bb_new(gt, 4, false);
- if (IS_ERR(bb))
- return PTR_ERR(bb);
-
job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
+ if (IS_ERR(job))
return PTR_ERR(job);
- }
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
+ timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
dma_fence_put(fence);
- xe_bb_free(bb, NULL);
if (timeout < 0)
return timeout;
else if (!timeout)
@@ -176,90 +171,143 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
-/*
- * Convert back from encoded value to type-safe, only to be used when reg.mcr
- * is true
- */
-static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
+static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
- return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
+ struct xe_bb *bb;
+ int ret;
+
+ bb = xe_bb_new(gt, 4, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ ret = emit_job_sync(q, bb, HZ);
+ xe_bb_free(bb, NULL);
+
+ return ret;
}
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
struct xe_reg_sr_entry *entry;
+ int count_rmw = 0, count = 0, ret;
unsigned long idx;
- struct xe_sched_job *job;
struct xe_bb *bb;
- struct dma_fence *fence;
- long timeout;
- int count = 0;
+ size_t bb_len = 0;
+ u32 *cs;
+
+ /* count RMW registers as those will be handled separately */
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
+ else
+ ++count_rmw;
+ }
+
+ if (count)
+ bb_len += count * 2 + 1;
+
+ if (count_rmw)
+ bb_len += count_rmw * 20 + 7;
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
- /* Big enough to emit all of the context's 3DSTATE */
- bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
- else
- /* Just pick a large BB size */
- bb = xe_bb_new(gt, SZ_4K, false);
+ /*
+ * Big enough to emit all of the context's 3DSTATE via
+ * xe_lrc_emit_hwe_state_instructions()
+ */
+ bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
+
+ xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
+ bb = xe_bb_new(gt, bb_len, false);
if (IS_ERR(bb))
return PTR_ERR(bb);
- xa_for_each(&sr->xa, idx, entry)
- ++count;
+ cs = bb->cs;
if (count) {
- xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ /*
+ * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
+ * reg + 1
+ */
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
- struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
u32 val;
- /*
- * Skip reading the register if it's not really needed
- */
if (reg.masked)
val = entry->clr_bits << 16;
- else if (entry->clr_bits + 1)
- val = (reg.mcr ?
- xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
- else
+ else if (entry->clr_bits == ~0)
val = 0;
+ else
+ continue;
val |= entry->set_bits;
- bb->cs[bb->len++] = reg.addr;
- bb->cs[bb->len++] = val;
+ *cs++ = reg.addr;
+ *cs++ = val;
xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
}
}
- xe_lrc_emit_hwe_state_instructions(q, bb);
+ if (count_rmw) {
+ /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
- job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
- return PTR_ERR(job);
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ *cs++ = entry->reg.addr;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = entry->clr_bits;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = entry->set_bits;
+
+ *cs++ = MI_MATH(8);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ *cs++ = CS_ALU_INSTR_AND;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ *cs++ = CS_ALU_INSTR_OR;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = entry->reg.addr;
+
+ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+ entry->reg.addr, entry->clr_bits, entry->set_bits);
+ }
+
+ /* reset used GPR */
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = 0;
}
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
- xe_sched_job_push(job);
+ cs = xe_lrc_emit_hwe_state_instructions(q, cs);
+
+ bb->len = cs - bb->cs;
+
+ ret = emit_job_sync(q, bb, HZ);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
xe_bb_free(bb, NULL);
- if (timeout < 0)
- return timeout;
- else if (!timeout)
- return -ETIME;
- return 0;
+ return ret;
}
int xe_gt_record_default_lrcs(struct xe_gt *gt)
@@ -321,14 +369,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
goto put_nop_q;
}
- /* Reload golden LRC to record the effect of any indirect W/A */
- err = emit_nop_job(gt, q);
- if (err) {
- xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
- hwe->name, ERR_PTR(err), q->guc->id);
- goto put_nop_q;
- }
-
xe_map_memcpy_from(xe, default_lrc,
&q->lrc[0]->bo->vmap,
xe_lrc_pphwsp_offset(q->lrc[0]),
@@ -348,6 +388,7 @@ put_exec_queue:
int xe_gt_init_early(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -375,6 +416,27 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ xe_mocs_init_early(gt);
+
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
+
+ err = xe_uc_init_noalloc(&gt->uc);
+ if (err)
+ return err;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ xe_gt_mcr_init_early(gt);
+ xe_pat_init(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
return 0;
}
@@ -389,7 +451,7 @@ static void dump_pat_on_error(struct xe_gt *gt)
xe_pat_dump(gt, &p);
}
-static int gt_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_gt_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -398,7 +460,15 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (!fw_ref)
return -ETIMEDOUT;
- if (!xe_gt_is_media_type(gt)) {
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ xe_gt_topology_init(gt);
+ xe_gt_mcr_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
+
+ if (xe_gt_is_main_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
goto err_force_wake;
@@ -413,8 +483,10 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_gt_mcr_init(gt);
err = xe_hw_engines_init_early(gt);
- if (err)
+ if (err) {
+ dump_pat_on_error(gt);
goto err_force_wake;
+ }
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
@@ -435,13 +507,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
return 0;
err_force_wake:
- dump_pat_on_error(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
-static int all_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_all_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -474,7 +545,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_force_wake;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/*
* USM has its only SA pool to non-block behind user operations
*/
@@ -490,7 +561,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
tile->migrate = xe_migrate_init(tile);
@@ -500,7 +571,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
goto err_force_wake;
@@ -510,7 +581,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
xe_gt_apply_ccs_mode(gt);
}
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -528,39 +599,6 @@ err_force_wake:
return err;
}
-/*
- * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
- * enable CTB communication.
- */
-int xe_gt_init_hwconfig(struct xe_gt *gt)
-{
- unsigned int fw_ref;
- int err;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return -ETIMEDOUT;
-
- xe_gt_mcr_init_early(gt);
- xe_pat_init(gt);
-
- err = xe_uc_init(&gt->uc);
- if (err)
- goto out_fw;
-
- err = xe_uc_init_hwconfig(&gt->uc);
- if (err)
- goto out_fw;
-
- xe_gt_topology_init(gt);
- xe_gt_mcr_init(gt);
- xe_gt_enable_host_l2_vram(gt);
-
-out_fw:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return err;
-}
-
static void xe_gt_fini(void *arg)
{
struct xe_gt *gt = arg;
@@ -588,17 +626,15 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = xe_gt_pagefault_init(gt);
+ err = xe_gt_sysfs_init(gt);
if (err)
return err;
- xe_mocs_init_early(gt);
-
- err = xe_gt_sysfs_init(gt);
+ err = gt_init_with_gt_forcewake(gt);
if (err)
return err;
- err = gt_fw_domain_init(gt);
+ err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -612,7 +648,7 @@ int xe_gt_init(struct xe_gt *gt)
xe_force_wake_init_engines(gt, gt_to_fw(gt));
- err = all_fw_domain_init(gt);
+ err = gt_init_with_all_forcewake(gt);
if (err)
return err;
@@ -700,7 +736,7 @@ static int vf_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
@@ -738,11 +774,11 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt)))
@@ -797,6 +833,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
}
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_gt_sriov_pf_stop_prepare(gt);
+
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);
@@ -857,7 +896,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_stop_prepare(&gt->uc);
+ xe_uc_suspend_prepare(&gt->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 187fa6490eaf..41880979f4de 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -24,11 +24,10 @@
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
- return should_fail(&gt_reset_failure, 1);
+ return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&gt_reset_failure, 1);
}
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
-int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
void xe_gt_mmio_init(struct xe_gt *gt);
@@ -107,6 +106,11 @@ static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
xe_device_uc_enabled(gt_to_xe(gt));
}
+static inline bool xe_gt_is_main_type(struct xe_gt *gt)
+{
+ return gt->info.type == XE_GT_TYPE_MAIN;
+}
+
static inline bool xe_gt_is_media_type(struct xe_gt *gt)
{
return gt->info.type == XE_GT_TYPE_MEDIA;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 2d63a69cbfa3..848618acdca8 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
+ int ret = 0;
xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_pm_runtime_put(xe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
+fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
@@ -121,24 +122,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int force_reset(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -299,20 +282,18 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
- {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
- {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
+/*
+ * only for GT debugfs files which can be safely used on the VF as well:
+ * - without access to the GT privileged registers
+ * - without access to the PF specific data
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
{"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
{"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
@@ -322,6 +303,87 @@ static const struct drm_info_list debugfs_list[] = {
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+};
+
+static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
+ void (*call)(struct xe_gt *), struct xe_gt *gt)
+{
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes)
+ call(gt);
+ return count;
+}
+
+static void force_reset(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset_async(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset, gt);
+}
+
+static int force_reset_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset);
+
+static void force_reset_sync(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_sync_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset_sync, gt);
+}
+
+static int force_reset_sync_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset_sync(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset_sync);
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -345,10 +407,19 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
- drm_debugfs_create_files(debugfs_list,
- ARRAY_SIZE(debugfs_list),
+ /* VF safe */
+ debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
+ debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
+
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
+ if (!IS_SRIOV_VF(xe))
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ root, minor);
+
xe_uc_debugfs_register(&gt->uc, root);
if (IS_SRIOV_PF(xe))
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 604bdc7c8173..60d9354e7dbf 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -32,13 +32,18 @@
* Xe's Freq provides a sysfs API for frequency management:
*
* device/tile#/gt#/freq0/<item>_freq *read-only* files:
+ *
* - act_freq: The actual resolved frequency decided by PCODE.
* - cur_freq: The current one requested by GuC PC to the PCODE.
* - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
+ * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
*
* device/tile#/gt#/freq0/<item>_freq *read-write* files:
+ *
* - min_freq: Min frequency request.
* - max_freq: Max frequency request.
* If max <= min, then freq_min becomes a fixed frequency request.
@@ -56,9 +61,10 @@ dev_to_xe(struct device *dev)
return gt_to_xe(kobj_to_gt(dev->kobj.parent));
}
-static ssize_t act_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t act_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -68,11 +74,12 @@ static ssize_t act_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(act_freq);
+static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq);
-static ssize_t cur_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -85,11 +92,12 @@ static ssize_t cur_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(cur_freq);
+static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq);
-static ssize_t rp0_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rp0_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -99,11 +107,12 @@ static ssize_t rp0_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rp0_freq);
+static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
-static ssize_t rpe_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpe_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -113,11 +122,12 @@ static ssize_t rpe_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpe_freq);
+static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq);
-static ssize_t rpa_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpa_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -127,20 +137,22 @@ static ssize_t rpa_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpa_freq);
+static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq);
-static ssize_t rpn_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpn_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
}
-static DEVICE_ATTR_RO(rpn_freq);
+static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq);
-static ssize_t min_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -154,9 +166,10 @@ static ssize_t min_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t min_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -173,11 +186,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
+static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq);
-static ssize_t max_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -191,9 +205,10 @@ static ssize_t max_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t max_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -210,17 +225,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(max_freq);
+static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
static const struct attribute *freq_attrs[] = {
- &dev_attr_act_freq.attr,
- &dev_attr_cur_freq.attr,
- &dev_attr_rp0_freq.attr,
- &dev_attr_rpa_freq.attr,
- &dev_attr_rpe_freq.attr,
- &dev_attr_rpn_freq.attr,
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
+ &attr_act_freq.attr,
+ &attr_cur_freq.attr,
+ &attr_rp0_freq.attr,
+ &attr_rpa_freq.attr,
+ &attr_rpe_freq.attr,
+ &attr_rpn_freq.attr,
+ &attr_min_freq.attr,
+ &attr_max_freq.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fbbace7b0b12..ffb210216aa9 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -121,7 +121,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (vcs_mask || vecs_mask)
gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
if (xe->info.platform != XE_DG1) {
@@ -249,9 +249,10 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
ssize_t ret;
@@ -262,11 +263,12 @@ static ssize_t name_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR_RO(name);
+static struct kobj_attribute name_attr = __ATTR_RO(name);
-static ssize_t idle_status_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t idle_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
@@ -277,6 +279,7 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
+static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status);
u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
{
@@ -291,10 +294,11 @@ u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
return residency;
}
-static DEVICE_ATTR_RO(idle_status);
-static ssize_t idle_residency_ms_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+
+static ssize_t idle_residency_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
@@ -305,12 +309,12 @@ static ssize_t idle_residency_ms_show(struct device *dev,
return sysfs_emit(buff, "%llu\n", residency);
}
-static DEVICE_ATTR_RO(idle_residency_ms);
+static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms);
static const struct attribute *gt_idle_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_idle_status.attr,
- &dev_attr_idle_residency_ms.attr,
+ &name_attr.attr,
+ &idle_status_attr.attr,
+ &idle_residency_attr.attr,
NULL,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 605aad3554e7..64a2f0d6aaf9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -345,7 +345,8 @@ fallback:
* Some older platforms don't have tables or don't have complete tables.
* Newer platforms should always have the required info.
*/
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000)
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 &&
+ !gt_to_xe(gt)->info.force_execlist)
xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
@@ -419,12 +420,6 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
-static void init_steering_inst0(struct xe_gt *gt)
-{
- gt->steering[INSTANCE0].group_target = 0; /* unused */
- gt->steering[INSTANCE0].instance_target = 0; /* unused */
-}
-
static const struct {
const char *name;
void (*init)(struct xe_gt *gt);
@@ -435,7 +430,7 @@ static const struct {
[DSS] = { "DSS", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
- [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 },
+ [INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
};
@@ -445,25 +440,17 @@ static const struct {
*
* Perform early software only initialization of the MCR lock to allow
* the synchronization on accessing the STEER_SEMAPHORE register and
- * use the xe_gt_mcr_multicast_write() function.
+ * use the xe_gt_mcr_multicast_write() function, plus the minimum
+ * safe MCR registers required for VRAM/CCS probing.
*/
void xe_gt_mcr_init_early(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
spin_lock_init(&gt->mcr_lock);
-}
-
-/**
- * xe_gt_mcr_init - Normal initialization of the MCR support
- * @gt: GT structure
- *
- * Perform normal initialization of the MCR for all usages.
- */
-void xe_gt_mcr_init(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
if (IS_SRIOV_VF(xe))
return;
@@ -504,10 +491,27 @@ void xe_gt_mcr_init(struct xe_gt *gt)
}
}
+ /* Mark instance 0 as initialized, we need this early for VRAM and CCS probe. */
+ gt->steering[INSTANCE0].initialized = true;
+}
+
+/**
+ * xe_gt_mcr_init - Normal initialization of the MCR support
+ * @gt: GT structure
+ *
+ * Perform normal initialization of the MCR for all usages.
+ */
+void xe_gt_mcr_init(struct xe_gt *gt)
+{
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
/* Select non-terminated steering target for each type */
- for (int i = 0; i < NUM_STEERING_TYPES; i++)
+ for (int i = 0; i < NUM_STEERING_TYPES; i++) {
+ gt->steering[i].initialized = true;
if (gt->steering[i].ranges && xe_steering_types[i].init)
xe_steering_types[i].init(gt);
+ }
}
/**
@@ -569,6 +573,10 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
if (xe_mmio_in_range(&gt->mmio, &gt->steering[type].ranges[i], reg)) {
+ drm_WARN(&gt_to_xe(gt)->drm, !gt->steering[type].initialized,
+ "Uninitialized usage of MCR register %s/%#x\n",
+ xe_steering_types[type].name, reg.addr);
+
*group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target;
return true;
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index c5ad9a0a89c2..5a75d56d8558 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,6 +14,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
@@ -68,31 +69,8 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->tile_invalidated);
-}
-
-static bool vma_matches(struct xe_vma *vma, u64 page_addr)
-{
- if (page_addr > xe_vma_end(vma) - 1 ||
- page_addr + SZ_4K - 1 < xe_vma_start(vma))
- return false;
-
- return true;
-}
-
-static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
-{
- struct xe_vma *vma = NULL;
-
- if (vm->usm.last_fault_vma) { /* Fast lookup */
- if (vma_matches(vm->usm.last_fault_vma, page_addr))
- vma = vm->usm.last_fault_vma;
- }
- if (!vma)
- vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
-
- return vma;
+ return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated);
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
@@ -143,7 +121,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
trace_xe_vma_pagefault(vma);
- /* Check if VMA is valid */
+ /* Check if VMA is valid, opportunistic check only */
if (vma_is_valid(tile, vma) && !atomic)
return 0;
@@ -180,7 +158,6 @@ retry_userptr:
dma_fence_wait(fence, false);
dma_fence_put(fence);
- vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
@@ -231,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- vma = lookup_vma(vm, pf->page_addr);
+ vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
goto unlock_vm;
@@ -240,7 +217,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
atomic = access_is_atomic(pf->access_type);
if (xe_vma_is_cpu_addr_mirror(vma))
- err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ err = xe_svm_handle_pagefault(vm, vma, gt,
pf->page_addr, atomic);
else
err = handle_vma_pagefault(gt, vma, atomic);
@@ -266,22 +243,22 @@ static int send_pagefault_reply(struct xe_guc *guc,
return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
+static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
- drm_dbg(&xe->drm, "\n\tASID: %d\n"
- "\tVFID: %d\n"
- "\tPDATA: 0x%04x\n"
- "\tFaulted Address: 0x%08x%08x\n"
- "\tFaultType: %d\n"
- "\tAccessType: %d\n"
- "\tFaultLevel: %d\n"
- "\tEngineClass: %d %s\n"
- "\tEngineInstance: %d\n",
- pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
- lower_32_bits(pf->page_addr),
- pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
- pf->engine_instance);
+ xe_gt_dbg(gt, "\n\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tPDATA: 0x%04x\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d %s\n"
+ "\tEngineInstance: %d\n",
+ pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
+ lower_32_bits(pf->page_addr),
+ pf->fault_type, pf->access_type, pf->fault_level,
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
@@ -333,7 +310,6 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
struct pf_queue *pf_queue;
unsigned long flags;
u32 asid;
@@ -358,7 +334,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
- drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
+ xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
}
spin_unlock_irqrestore(&pf_queue->lock, flags);
@@ -371,7 +347,6 @@ static void pf_queue_work_func(struct work_struct *w)
{
struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
struct xe_gt *gt = pf_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc_pagefault_reply reply = {};
struct pagefault pf = {};
unsigned long threshold;
@@ -382,9 +357,9 @@ static void pf_queue_work_func(struct work_struct *w)
while (get_pagefault(pf_queue, &pf)) {
ret = handle_pagefault(gt, &pf);
if (unlikely(ret)) {
- print_pagefault(xe, &pf);
+ print_pagefault(gt, &pf);
pf.fault_unsuccessful = 1;
- drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
+ xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
}
reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
@@ -435,9 +410,17 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
- /* user can issue separate page faults per EU and per CS */
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
+ pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw);
+#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
@@ -530,21 +513,21 @@ static int sub_granularity_in_byte(int val)
return (granularity_in_byte(val) / 32);
}
-static void print_acc(struct xe_device *xe, struct acc *acc)
+static void print_acc(struct xe_gt *gt, struct acc *acc)
{
- drm_warn(&xe->drm, "Access counter request:\n"
- "\tType: %s\n"
- "\tASID: %d\n"
- "\tVFID: %d\n"
- "\tEngine: %d:%d\n"
- "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
- "\tSub_Granularity Vector: 0x%08x\n"
- "\tVA Range base: 0x%016llx\n",
- acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
- acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
- granularity_in_byte(acc->granularity) / SZ_1K,
- sub_granularity_in_byte(acc->granularity) / SZ_1K,
- acc->sub_granularity, acc->va_range_base);
+ xe_gt_warn(gt, "Access counter request:\n"
+ "\tType: %s\n"
+ "\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tEngine: %d:%d\n"
+ "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
+ "\tSub_Granularity Vector: 0x%08x\n"
+ "\tVA Range base: 0x%016llx\n",
+ acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
+ acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
+ granularity_in_byte(acc->granularity) / SZ_1K,
+ sub_granularity_in_byte(acc->granularity) / SZ_1K,
+ acc->sub_granularity, acc->va_range_base);
}
static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
@@ -642,7 +625,6 @@ static void acc_queue_work_func(struct work_struct *w)
{
struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
struct xe_gt *gt = acc_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct acc acc = {};
unsigned long threshold;
int ret;
@@ -652,8 +634,8 @@ static void acc_queue_work_func(struct work_struct *w)
while (get_acc(acc_queue, &acc)) {
ret = handle_acc(gt, &acc);
if (unlikely(ret)) {
- print_acc(xe, &acc);
- drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
+ print_acc(gt, &acc);
+ xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
}
if (time_after(jiffies, threshold) &&
@@ -698,7 +680,7 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
- drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
+ xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
}
spin_unlock(&acc_queue->lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index c08efca6420e..bdbd15f3afe3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -16,6 +16,7 @@
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_submit.h"
#include "xe_mmio.h"
#include "xe_pm.h"
@@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt)
static void pf_init_workers(struct xe_gt *gt)
{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
}
+static void pf_fini_workers(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ disable_work_sync(&gt->sriov.pf.workers.restart);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static void pf_fini_action(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ pf_fini_workers(gt);
+}
+
+static int pf_init_late(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
+}
+
/**
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt)
if (err)
return err;
- return xe_gt_sriov_pf_migration_init(gt);
+ err = xe_gt_sriov_pf_migration_init(gt);
+ if (err)
+ return err;
+
+ err = pf_init_late(gt);
+ if (err)
+ return err;
+
+ return 0;
}
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
@@ -172,6 +203,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
+static void pf_cancel_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+}
+
+/**
+ * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on the PF.
+ */
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+ pf_cancel_restart(gt);
+}
+
static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -211,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
+
+static void pf_flush_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ flush_work(&gt->sriov.pf.workers.restart);
+}
+
+/**
+ * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
+{
+ /* don't wait if there is another ongoing reset */
+ if (xe_guc_read_stopped(&gt->uc.guc))
+ return -EBUSY;
+
+ pf_flush_restart(gt);
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index f474509411c0..e7fde3f9937a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -11,8 +11,10 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@@ -29,6 +31,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
+static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+}
+
static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 10be109bf357..494909f74eb2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -104,13 +104,13 @@ static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
}
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
void *klvs = xe_guc_buf_cpu_ptr(buf);
char name[8];
- xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
- xe_sriov_function_name(vfid, name, sizeof(name)),
- num_klvs, str_plural(num_klvs));
+ xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
@@ -238,26 +238,35 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
{
u32 n = 0;
- if (xe_ggtt_node_allocated(config->ggtt_region)) {
- if (details) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
- }
-
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(start);
+ cfg[n++] = upper_32_bits(start);
}
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(size);
+ cfg[n++] = upper_32_bits(size);
+
return n;
}
/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+{
+ struct xe_ggtt_node *node = config->ggtt_region;
+
+ if (!xe_ggtt_node_allocated(node))
+ return 0;
+
+ return encode_ggtt(cfg, node->base.start, node->base.size, details);
+}
+
+/* Return: number of configuration dwords written */
static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
@@ -282,8 +291,8 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
if (config->lmem_obj) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
- cfg[n++] = lower_32_bits(config->lmem_obj->size);
- cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
+ cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
}
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
@@ -332,6 +341,17 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+ if (vfid == PFID) {
+ u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
+
+ /* plain PF config data will never include a real GGTT region */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
+
+ /* fake PF GGTT config covers full GGTT range except reserved WOPCM */
+ num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
+ }
+
num_klvs = xe_guc_klv_count(cfg, num_dwords);
err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
@@ -376,7 +396,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
{
u64 spare;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -388,7 +408,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
{
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -443,7 +463,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
size = round_up(size, alignment);
@@ -492,7 +512,7 @@ static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_ggtt_node *node = config->ggtt_region;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
@@ -560,7 +580,7 @@ int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
@@ -622,7 +642,7 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -693,7 +713,7 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
fair = pf_estimate_fair_ggtt(gt, num_vfs);
@@ -1299,7 +1319,7 @@ static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
struct xe_bo *bo;
bo = config->lmem_obj;
- return bo ? bo->size : 0;
+ return bo ? xe_bo_size(bo) : 0;
}
static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1327,7 +1347,17 @@ static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 si
static void pf_force_lmtt_invalidate(struct xe_device *xe)
{
- /* TODO */
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ xe_assert(xe, xe_device_has_lmtt(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_invalidate_hw(lmtt);
+ }
}
static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
@@ -1388,7 +1418,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
if (err)
goto fail;
- offset += bo->size;
+ offset += xe_bo_size(bo);
}
}
@@ -1406,7 +1436,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (config->lmem_obj) {
@@ -1425,7 +1455,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, IS_DGFX(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1444,15 +1474,23 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_locked(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ err = xe_bo_pin(bo);
+ xe_bo_unlock(bo);
+ if (unlikely(err)) {
+ xe_bo_put(bo);
+ return err;
+ }
+
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
@@ -1461,12 +1499,12 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
goto release;
}
- err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
if (unlikely(err))
goto reset_lmtt;
xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
- vfid, bo->size, bo->size / SZ_1M);
+ vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
return 0;
reset_lmtt:
@@ -1512,6 +1550,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
+ xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
err = pf_provision_vf_lmem(gt, vfid, size);
@@ -1542,7 +1582,7 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -1619,9 +1659,9 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
return 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
@@ -1653,7 +1693,7 @@ int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
result = result ?: err;
err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
@@ -1981,7 +2021,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
@@ -2072,7 +2112,7 @@ static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
* Only GGTT and LMEM requires to be cleared by the PF.
* GuC doorbell IDs and context IDs do not need any clearing.
*/
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_sanitize_ggtt(config->ggtt_region, vfid);
if (IS_DGFX(xe))
err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
@@ -2139,7 +2179,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool is_primary = !xe_gt_is_media_type(gt);
+ bool is_primary = xe_gt_is_main_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -2155,7 +2195,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
- if (IS_DGFX(xe)) {
+ if (xe_device_has_lmtt(xe)) {
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
@@ -2339,7 +2379,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return -EINVAL;
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
drm_printf(&p, "restoring VF%u config:\n", vfid);
xe_guc_klv_print(buf, size / sizeof(u32), &p);
@@ -2356,6 +2396,35 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void pf_prepare_self_config(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
+
+ /*
+ * We want PF to be allowed to use all of context ID, doorbells IDs
+ * and whole usable GGTT area. While we can store ctxs/dbs numbers
+ * directly in the config structure, can't do the same with the GGTT
+ * configuration, so let it be prepared on demand while pushing KLVs.
+ */
+ config->num_ctxs = GUC_ID_MAX;
+ config->num_dbs = GUC_NUM_DOORBELLS;
+}
+
+static int pf_push_self_config(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_push_full_vf_config(gt, PFID);
+ if (err) {
+ xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
+ return 0;
+}
+
static void fini_config(void *arg)
{
struct xe_gt *gt = arg;
@@ -2379,9 +2448,18 @@ static void fini_config(void *arg)
int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ int err;
xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_prepare_self_config(gt);
+ err = pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (err)
+ return err;
+
return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
}
@@ -2399,6 +2477,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
unsigned int fail = 0, skip = 0;
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
for (n = 1; n <= total_vfs; n++) {
if (xe_gt_sriov_pf_config_is_empty(gt, n))
skip++;
@@ -2542,10 +2624,10 @@ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
if (!config->lmem_obj)
continue;
- string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
buf, sizeof(buf));
drm_printf(p, "VF%u:\t%zu\t(%s)\n",
- n, config->lmem_obj->size, buf);
+ n, xe_bo_size(config->lmem_obj), buf);
}
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 1f50aec3a059..4f7fff892bc0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -15,10 +15,11 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
-#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf_service.h"
+#include "xe_tile.h"
static const char *control_cmd_to_string(u32 cmd)
{
@@ -1064,7 +1065,9 @@ static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
return false;
- xe_gt_sriov_pf_service_reset(gt, vfid);
+ if (xe_tile_is_root(gt->tile) && xe_gt_is_main_type(gt))
+ xe_sriov_pf_service_reset_vf(gt_to_xe(gt), vfid);
+
xe_gt_sriov_pf_monitor_flr(gt, vfid);
pf_enter_vf_flr_reset_mmio(gt, vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index b2521dd6ec42..3ed245e04d0c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -22,6 +22,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
+#include "xe_sriov_pf.h"
/*
* /sys/kernel/debug/dri/0/
@@ -51,27 +52,18 @@ static unsigned int extract_vfid(struct dentry *d)
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
* │   │   ├── contexts_provisioned
* │   │   ├── doorbells_provisioned
* │   │   ├── runtime_registers
* │   │   ├── negotiated_versions
* │   │   ├── adverse_events
+ * ├── gt1
+ * │   ├── pf
+ * │   │   ├── ...
*/
static const struct drm_info_list pf_info[] = {
{
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
- {
"contexts_provisioned",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_config_print_ctxs,
@@ -82,24 +74,50 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_config_print_dbs,
},
{
- "lmem_provisioned",
+ "runtime_registers",
.show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
+ .data = xe_gt_sriov_pf_service_print_runtime,
},
{
- "runtime_registers",
+ "adverse_events",
.show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_service_print_runtime,
+ .data = xe_gt_sriov_pf_monitor_print_events,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ */
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
},
{
- "negotiated_versions",
+ "ggtt_provisioned",
.show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_service_print_version,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
},
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── lmem_provisioned
+ */
+
+static const struct drm_info_list pf_lmem_info[] = {
{
- "adverse_events",
+ "lmem_provisioned",
.show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_monitor_print_events,
+ .data = xe_gt_sriov_pf_config_print_lmem,
},
};
@@ -188,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \
return -EOVERFLOW; \
\
xe_pm_runtime_get(xe); \
- err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -283,10 +302,10 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
xe_gt_assert(gt, gt == extract_gt(parent));
xe_gt_assert(gt, vfid == extract_vfid(parent));
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
0644, parent, parent, &ggtt_fops);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
0644, parent, parent, &lmem_fops);
}
@@ -532,6 +551,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ if (xe_gt_is_main_type(gt)) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ pfdentry, minor);
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
+ drm_debugfs_create_files(pf_lmem_info,
+ ARRAY_SIZE(pf_lmem_info),
+ pfdentry, minor);
+ }
+
pf_add_policy_attrs(gt, pfdentry);
pf_add_config_attrs(gt, pfdentry, PFID);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 4efde5f46b43..76dd9233ef9f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -19,91 +19,7 @@
#include "xe_gt_sriov_pf_service_types.h"
#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
-
-static void pf_init_versions(struct xe_gt *gt)
-{
- BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
- BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
-
- /* base versions may differ between platforms */
- gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
- gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
-
- /* latest version is same for all platforms */
- gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
- gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_negotiate_version(struct xe_gt *gt,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
- struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
-
- xe_gt_assert(gt, base.major);
- xe_gt_assert(gt, base.major <= latest.major);
- xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
-
- /* VF doesn't care - return our latest */
- if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
- wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants newer than our - return our latest */
- if (wanted_major > latest.major) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants older than min required - reject */
- if (wanted_major < base.major ||
- (wanted_major == base.major && wanted_minor < base.minor)) {
- return -EPERM;
- }
-
- /* previous major - return wanted, as we should still support it */
- if (wanted_major < latest.major) {
- /* XXX: we are not prepared for multi-versions yet */
- xe_gt_assert(gt, base.major == latest.major);
- return -ENOPKG;
- }
-
- /* same major - return common minor */
- *major = wanted_major;
- *minor = min_t(u32, latest.minor, wanted_minor);
- return 0;
-}
-
-static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
- xe_gt_assert(gt, major || minor);
-
- gt->sriov.pf.vfs[vfid].version.major = major;
- gt->sriov.pf.vfs[vfid].version.minor = minor;
-}
-
-static void pf_disconnect(struct xe_gt *gt, u32 vfid)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- gt->sriov.pf.vfs[vfid].version.major = 0;
- gt->sriov.pf.vfs[vfid].version.minor = 0;
-}
-
-static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- return major == gt->sriov.pf.vfs[vfid].version.major &&
- minor <= gt->sriov.pf.vfs[vfid].version.minor;
-}
+#include "xe_sriov_pf_service.h"
static const struct xe_reg tgl_runtime_regs[] = {
RPM_CONFIG0, /* _MMIO(0x0d00) */
@@ -112,7 +28,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -124,7 +39,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -136,7 +50,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -150,7 +63,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -167,7 +79,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -185,7 +96,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -272,7 +182,7 @@ static void pf_prepare_runtime_info(struct xe_gt *gt)
read_many(gt, size, regs, values);
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
xe_gt_sriov_pf_service_print_runtime(gt, &p);
}
@@ -291,8 +201,6 @@ int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
{
int err;
- pf_init_versions(gt);
-
err = pf_alloc_runtime_info(gt);
if (unlikely(err))
goto failed;
@@ -317,47 +225,6 @@ void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
pf_prepare_runtime_info(gt);
}
-/**
- * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
- * @gt: the &xe_gt
- * @vfid: the VF identifier
- *
- * Reset a VF driver negotiated VF/PF ABI version.
- * After that point, the VF driver will have to perform new version handshake
- * to continue use of the PF services again.
- *
- * This function can only be called on PF.
- */
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
-{
- pf_disconnect(gt, vfid);
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- int err;
-
- xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
- vfid, wanted_major, wanted_minor);
-
- err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
-
- if (err < 0) {
- xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
- vfid, wanted_major, wanted_minor, ERR_PTR(err));
- pf_disconnect(gt, vfid);
- } else {
- xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
- vfid, *major, *minor);
- pf_connect(gt, vfid, *major, *minor);
- }
-
- return 0;
-}
-
/* Return: length of the response message or a negative error code on failure. */
static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
const u32 *request, u32 len, u32 *response, u32 size)
@@ -377,7 +244,8 @@ static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
- err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
+ err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor,
+ &major, &minor);
if (err < 0)
return err;
@@ -436,8 +304,10 @@ static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
u32 remaining = 0;
int ret;
- if (!pf_is_negotiated(gt, origin, 1, 0))
+ /* this action is available from ABI 1.0 */
+ if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0))
return -EACCES;
+
if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
return -EMSGSIZE;
if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
@@ -534,33 +404,3 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p
return 0;
}
-
-/**
- * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
- * @gt: the &xe_gt
- * @p: the &drm_printer
- *
- * This function is for PF use only.
- */
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
- struct xe_gt_sriov_pf_service_version *version;
-
- xe_gt_assert(gt, IS_SRIOV_PF(xe));
-
- for (n = 1; n <= total_vfs; n++) {
- version = &gt->sriov.pf.vfs[n].version;
- if (!version->major && !version->minor)
- continue;
-
- drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
- }
-
- return 0;
-}
-
-#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
-#include "tests/xe_gt_sriov_pf_service_test.c"
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
index 56aaadf0360d..10b02c9b651c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
@@ -14,9 +14,7 @@ struct xe_gt;
int xe_gt_sriov_pf_service_init(struct xe_gt *gt);
void xe_gt_sriov_pf_service_update(struct xe_gt *gt);
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid);
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p);
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index a439261bf4d7..b282838d59e6 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -82,17 +82,17 @@ int xe_gt_sriov_vf_reset(struct xe_gt *gt)
}
static int guc_action_match_version(struct xe_guc *guc,
- u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
- u32 *branch, u32 *major, u32 *minor, u32 *patch)
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
{
u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
GUC_ACTION_VF2GUC_MATCH_VERSION),
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor),
};
u32 response[GUC_MAX_MMIO_MSG_LEN];
int ret;
@@ -106,120 +106,138 @@ static int guc_action_match_version(struct xe_guc *guc,
if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
return -EPROTO;
- *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
- *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
- *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
- *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
+ memset(found, 0, sizeof(struct xe_uc_fw_version));
+ found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
+ found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
+ found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
+ found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
return 0;
}
-static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static int guc_action_match_version_any(struct xe_guc *guc,
+ struct xe_uc_fw_version *found)
+{
+ struct xe_uc_fw_version wanted = {
+ .branch = GUC_VERSION_BRANCH_ANY,
+ .major = GUC_VERSION_MAJOR_ANY,
+ .minor = GUC_VERSION_MINOR_ANY,
+ .patch = 0
+ };
+
+ return guc_action_match_version(guc, &wanted, found);
+}
+
+static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
struct xe_device *xe = gt_to_xe(gt);
+ memset(ver, 0, sizeof(struct xe_uc_fw_version));
+
switch (xe->info.platform) {
case XE_TIGERLAKE ... XE_PVC:
/* 1.1 this is current baseline for Xe driver */
- *branch = 0;
- *major = 1;
- *minor = 1;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 1;
break;
default:
/* 1.2 has support for the GMD_ID KLV */
- *branch = 0;
- *major = 1;
- *minor = 2;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 2;
break;
}
}
-static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
/* for now it's the same as minimum */
- return vf_minimum_guc_version(gt, branch, major, minor);
+ return vf_minimum_guc_version(gt, ver);
}
static int vf_handshake_with_guc(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version wanted = {0};
struct xe_guc *guc = &gt->uc.guc;
- u32 wanted_branch, wanted_major, wanted_minor;
- u32 branch, major, minor, patch;
+ bool old = false;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
/* select wanted version - prefer previous (if any) */
if (guc_version->major || guc_version->minor) {
- wanted_branch = guc_version->branch;
- wanted_major = guc_version->major;
- wanted_minor = guc_version->minor;
+ wanted = *guc_version;
+ old = true;
} else {
- vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
+ vf_wanted_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+
+ /* First time we handshake, so record the minimum wanted */
+ gt->sriov.vf.wanted_guc_version = wanted;
}
- err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
- &branch, &major, &minor, &patch);
+ err = guc_action_match_version(guc, &wanted, guc_version);
if (unlikely(err))
goto fail;
- /* we don't support interface version change */
- if ((guc_version->major || guc_version->minor) &&
- (guc_version->branch != branch || guc_version->major != major ||
- guc_version->minor != minor)) {
- xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
- branch, major, minor, patch);
- xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
- guc_version->branch, guc_version->major,
- guc_version->minor, guc_version->patch);
- err = -EREMCHG;
- goto fail;
+ if (old) {
+ /* we don't support interface version change */
+ if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
+ xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
+ xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
+ wanted.branch, wanted.major,
+ wanted.minor, wanted.patch);
+ err = -EREMCHG;
+ goto fail;
+ } else {
+ /* version is unchanged, no need to re-verify it */
+ return 0;
+ }
}
/* illegal */
- if (major > wanted_major) {
+ if (guc_version->major > wanted.major) {
err = -EPROTO;
goto unsupported;
}
/* there's no fallback on major version. */
- if (major != wanted_major) {
+ if (guc_version->major != wanted.major) {
err = -ENOPKG;
goto unsupported;
}
/* check against minimum version supported by us */
- vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
- if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
+ vf_minimum_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+ if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
err = -ENOKEY;
goto unsupported;
}
xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
- guc_version->branch = branch;
- guc_version->major = major;
- guc_version->minor = minor;
- guc_version->patch = patch;
return 0;
unsupported:
xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
- branch, major, minor, patch, ERR_PTR(err));
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch,
+ ERR_PTR(err));
fail:
xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
- wanted_major, wanted_minor, ERR_PTR(err));
+ wanted.major, wanted.minor, ERR_PTR(err));
/* try again with *any* just to query which version is supported */
- if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
- GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
- &branch, &major, &minor, &patch))
+ if (!guc_action_match_version_any(guc, &wanted))
xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ wanted.branch, wanted.major, wanted.minor, wanted.patch);
return err;
}
@@ -250,6 +268,29 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions
+ * @gt: the &xe_gt
+ * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version
+ * @found: pointer to the xe_uc_fw_version to be filled with the found version
+ *
+ * This function is for VF use only and it can only be used after successful
+ * version handshake with the GuC.
+ */
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
+{
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+
+ if (wanted)
+ *wanted = gt->sriov.vf.wanted_guc_version;
+
+ if (found)
+ *found = gt->sriov.vf.guc_version;
+}
+
static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
{
u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
@@ -415,6 +456,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
+ config->ggtt_shift = start - (s64)config->ggtt_base;
config->ggtt_base = start;
config->ggtt_size = size;
@@ -510,7 +552,7 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
err = vf_get_lmem_info(gt);
if (unlikely(err))
return err;
@@ -560,106 +602,56 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
-static struct xe_ggtt_node *
-vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
-{
- struct xe_ggtt_node *node;
- int err;
-
- node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(node))
- return node;
-
- err = xe_ggtt_node_insert_balloon(node, start, end);
- if (err) {
- xe_ggtt_node_fini(node);
- return ERR_PTR(err);
- }
-
- return node;
-}
-
-static int vf_balloon_ggtt(struct xe_gt *gt)
+/**
+ * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_ggtt *ggtt = tile->mem.ggtt;
- struct xe_device *xe = gt_to_xe(gt);
- u64 start, end;
-
- xe_gt_assert(gt, IS_SRIOV_VF(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
-
- if (!config->ggtt_size)
- return -ENODATA;
-
- /*
- * VF can only use part of the GGTT as allocated by the PF:
- *
- * WOPCM GUC_GGTT_TOP
- * |<------------ Total GGTT size ------------------>|
- *
- * VF GGTT base -->|<- size ->|
- *
- * +--------------------+----------+-----------------+
- * |////////////////////| block |\\\\\\\\\\\\\\\\\|
- * +--------------------+----------+-----------------+
- *
- * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
- */
-
- start = xe_wopcm_size(xe);
- end = config->ggtt_base;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
- }
-
- start = config->ggtt_base + config->ggtt_size;
- end = GUC_GGTT_TOP;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
- }
- }
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- return 0;
+ return gt->sriov.vf.self_config.ggtt_size;
}
-static void deballoon_ggtt(struct drm_device *drm, void *arg)
+/**
+ * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: base offset of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
{
- struct xe_tile *tile = arg;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return gt->sriov.vf.self_config.ggtt_base;
}
/**
- * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
- * @gt: the &xe_gt
+ * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
+ * @gt: the &xe_gt struct instance
*
* This function is for VF use only.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: The shift value; could be negative
*/
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (xe_gt_is_media_type(gt))
- return 0;
+ struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- err = vf_balloon_ggtt(gt);
- if (err)
- return err;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
+ return config->ggtt_shift;
}
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
@@ -694,21 +686,22 @@ static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
return 0;
}
-static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
+static void vf_connect_pf(struct xe_device *xe, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_assert(xe, IS_SRIOV_VF(xe));
- gt->sriov.vf.pf_version.major = major;
- gt->sriov.vf.pf_version.minor = minor;
+ xe->sriov.vf.pf_version.major = major;
+ xe->sriov.vf.pf_version.minor = minor;
}
-static void vf_disconnect_pf(struct xe_gt *gt)
+static void vf_disconnect_pf(struct xe_device *xe)
{
- vf_connect_pf(gt, 0, 0);
+ vf_connect_pf(xe, 0, 0);
}
static int vf_handshake_with_pf(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
u32 major = major_wanted, minor = minor_wanted;
@@ -724,13 +717,13 @@ static int vf_handshake_with_pf(struct xe_gt *gt)
}
xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
- vf_connect_pf(gt, major, minor);
+ vf_connect_pf(xe, major, minor);
return 0;
failed:
xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
major, minor, ERR_PTR(err));
- vf_disconnect_pf(gt);
+ vf_disconnect_pf(xe);
return err;
}
@@ -783,10 +776,12 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ struct xe_device *xe = gt_to_xe(gt);
- return major == gt->sriov.vf.pf_version.major &&
- minor <= gt->sriov.vf.pf_version.minor;
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ return major == xe->sriov.vf.pf_version.major &&
+ minor <= xe->sriov.vf.pf_version.minor;
}
static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
@@ -974,7 +969,6 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
struct vf_runtime_reg *rr;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
xe_gt_assert(gt, !reg.vf);
if (reg.addr == GMD_ID.addr) {
@@ -1043,7 +1037,9 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
}
@@ -1079,19 +1075,21 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
- struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
- u32 branch, major, minor;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *wanted = &gt->sriov.vf.wanted_guc_version;
+ struct xe_sriov_vf_relay_version *pf_version = &xe->sriov.vf.pf_version;
+ struct xe_uc_fw_version ver;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
drm_printf(p, "GuC ABI:\n");
- vf_minimum_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
+ vf_minimum_guc_version(gt, &ver);
+ drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor);
- vf_wanted_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
+ drm_printf(p, "\twanted:\t%u.%u.%u.*\n",
+ wanted->branch, wanted->major, wanted->minor);
drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index ba6c5d74e326..e0357f341a2d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -11,19 +11,26 @@
struct drm_printer;
struct xe_gt;
struct xe_reg;
+struct xe_uc_fw_version;
int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index a57f13b5afcd..298dedf4b009 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,30 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
-
-/**
- * struct xe_gt_sriov_vf_guc_version - GuC ABI version details.
- */
-struct xe_gt_sriov_vf_guc_version {
- /** @branch: branch version. */
- u8 branch;
- /** @major: major version. */
- u8 major;
- /** @minor: minor version. */
- u8 minor;
- /** @patch: patch version. */
- u8 patch;
-};
-
-/**
- * struct xe_gt_sriov_vf_relay_version - PF ABI version details.
- */
-struct xe_gt_sriov_vf_relay_version {
- /** @major: major version. */
- u16 major;
- /** @minor: minor version. */
- u16 minor;
-};
+#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
@@ -40,6 +17,8 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_base;
/** @ggtt_size: assigned size of the GGTT region. */
u64 ggtt_size;
+ /** @ggtt_shift: difference in ggtt_base on last migration */
+ s64 ggtt_shift;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
@@ -71,12 +50,12 @@ struct xe_gt_sriov_vf_runtime {
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
+ /** @wanted_guc_version: minimum wanted GuC ABI version. */
+ struct xe_uc_fw_version wanted_guc_version;
/** @guc_version: negotiated GuC ABI version. */
- struct xe_gt_sriov_vf_guc_version guc_version;
+ struct xe_uc_fw_version guc_version;
/** @self_config: resource configurations. */
struct xe_gt_sriov_vf_selfconfig self_config;
- /** @pf_version: negotiated VF/PF ABI version. */
- struct xe_gt_sriov_vf_relay_version pf_version;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 6155ea354432..30f942671c2b 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "svm_pagefault_count",
"tlb_inval_count",
"vma_pagefault_count",
"vma_pagefault_kb",
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index d556771f99d6..be3244d7133c 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_STATS_TYPES_H_
enum xe_gt_stats_id {
+ XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 8db78d616b6f..aa962c783cdf 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -114,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt)
return tdc;
}
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool status = !!read_status(gt);
return sysfs_emit(buff, "%u\n", status);
}
-static DEVICE_ATTR_RO(status);
+static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl1_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl1 = !!read_reason_pl1(gt);
return sysfs_emit(buff, "%u\n", pl1);
}
-static DEVICE_ATTR_RO(reason_pl1);
+static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-static ssize_t reason_pl2_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl2_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl2 = !!read_reason_pl2(gt);
return sysfs_emit(buff, "%u\n", pl2);
}
-static DEVICE_ATTR_RO(reason_pl2);
+static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
-static ssize_t reason_pl4_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl4_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl4 = !!read_reason_pl4(gt);
return sysfs_emit(buff, "%u\n", pl4);
}
-static DEVICE_ATTR_RO(reason_pl4);
+static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
-static ssize_t reason_thermal_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_thermal_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermal = !!read_reason_thermal(gt);
return sysfs_emit(buff, "%u\n", thermal);
}
-static DEVICE_ATTR_RO(reason_thermal);
+static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
-static ssize_t reason_prochot_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_prochot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool prochot = !!read_reason_prochot(gt);
return sysfs_emit(buff, "%u\n", prochot);
}
-static DEVICE_ATTR_RO(reason_prochot);
+static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_ratl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool ratl = !!read_reason_ratl(gt);
return sysfs_emit(buff, "%u\n", ratl);
}
-static DEVICE_ATTR_RO(reason_ratl);
+static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-static ssize_t reason_vr_thermalert_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermalert = !!read_reason_vr_thermalert(gt);
return sysfs_emit(buff, "%u\n", thermalert);
}
-static DEVICE_ATTR_RO(reason_vr_thermalert);
+static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-static ssize_t reason_vr_tdc_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_tdc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool tdc = !!read_reason_vr_tdc(gt);
return sysfs_emit(buff, "%u\n", tdc);
}
-static DEVICE_ATTR_RO(reason_vr_tdc);
+static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
static struct attribute *throttle_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_reason_pl1.attr,
- &dev_attr_reason_pl2.attr,
- &dev_attr_reason_pl4.attr,
- &dev_attr_reason_thermal.attr,
- &dev_attr_reason_prochot.attr,
- &dev_attr_reason_ratl.attr,
- &dev_attr_reason_vr_thermalert.attr,
- &dev_attr_reason_vr_tdc.attr,
+ &attr_status.attr,
+ &attr_reason_pl1.attr,
+ &attr_reason_pl2.attr,
+ &attr_reason_pl4.attr,
+ &attr_reason_thermal.attr,
+ &attr_reason_prochot.attr,
+ &attr_reason_ratl.attr,
+ &attr_reason_vr_thermalert.attr,
+ &attr_reason_vr_tdc.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 084cbdeba8ea..086c12ee3d9d 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -138,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
int pending_seqno;
/*
+ * we can get here before the CTs are even initialized if we're wedging
+ * very early, in which case there are not going to be any pending
+ * fences so we can bail immediately.
+ */
+ if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
+ return;
+
+ /*
* CT channel is already disabled at this point. No new TLB requests can
* appear.
*/
@@ -322,6 +330,40 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+static int send_tlb_invalidation_all(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ 0, /* seqno, replaced in send_tlb_invalidation */
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
+}
+
+/**
+ * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
+ * @gt: the &xe_gt structure
+ * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
+ *
+ * Send a request to invalidate all TLBs across PF and all VFs.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ int err;
+
+ xe_gt_assert(gt, gt == fence->gt);
+
+ err = send_tlb_invalidation_all(gt, fence);
+ if (err)
+ xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
+
+ return err;
+}
+
/*
* Ensure that roundup_pow_of_two(length) doesn't overflow.
* Note that roundup_pow_of_two() operates on unsigned long,
@@ -441,30 +483,6 @@ void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
- * @vma: VMA to invalidate
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
-{
- xe_gt_assert(gt, vma);
-
- return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index abe9b03d543e..f7f0f2eaf4b5 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -19,10 +19,8 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma);
void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 516c81e3b8dd..8c63e3263643 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,23 +12,20 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
-static void
-load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs,
+ const struct xe_reg regs[])
{
- va_list argp;
u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
int i;
- if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
- numregs = XE_MAX_DSS_FUSE_REGS;
+ xe_gt_assert(gt, numregs <= ARRAY_SIZE(fuse_val));
- va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
- va_end(argp);
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, regs[i]);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
}
@@ -218,9 +215,19 @@ get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
void
xe_gt_topology_init(struct xe_gt *gt)
{
+ static const struct xe_reg geometry_regs[] = {
+ XELP_GT_GEOMETRY_DSS_ENABLE,
+ XE2_GT_GEOMETRY_DSS_1,
+ XE2_GT_GEOMETRY_DSS_2,
+ };
+ static const struct xe_reg compute_regs[] = {
+ XEHP_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
+ XE2_GT_COMPUTE_DSS_2,
+ };
+ int num_geometry_regs, num_compute_regs;
struct xe_device *xe = gt_to_xe(gt);
struct drm_printer p;
- int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -228,23 +235,18 @@ xe_gt_topology_init(struct xe_gt *gt)
* Register counts returned shouldn't exceed the number of registers
* passed as parameters below.
*/
- drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
- drm_WARN_ON(&xe->drm, num_compute_regs > 3);
+ xe_gt_assert(gt, num_geometry_regs <= ARRAY_SIZE(geometry_regs));
+ xe_gt_assert(gt, num_compute_regs <= ARRAY_SIZE(compute_regs));
load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
- num_geometry_regs,
- XELP_GT_GEOMETRY_DSS_ENABLE,
- XE2_GT_GEOMETRY_DSS_1,
- XE2_GT_GEOMETRY_DSS_2);
- load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
- XEHP_GT_COMPUTE_DSS_ENABLE,
- XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
- XE2_GT_COMPUTE_DSS_2);
+ num_geometry_regs, geometry_regs);
+ load_dss_mask(gt, gt->fuse_topo.c_dss_mask,
+ num_compute_regs, compute_regs);
+
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
- p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
-
+ p = xe_gt_dbg_printer(gt);
xe_gt_topology_dump(gt, &p);
}
@@ -288,11 +290,6 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}
-bool xe_dss_mask_empty(const xe_dss_mask_t mask)
-{
- return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
-}
-
/**
* xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
* @gt: GT to check
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index a72d26ba0653..c8140704ad4c 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -41,8 +41,6 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
-bool xe_dss_mask_empty(const xe_dss_mask_t mask);
-
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 7def0959da35..96344c604726 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -377,6 +377,8 @@ struct xe_gt {
u16 group_target;
/** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
+ /** @steering.initialized: Whether this steering range is initialized */
+ bool initialized;
} steering[NUM_STEERING_TYPES];
/**
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bc5714a5b36b..b1d1d6da3758 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -29,6 +29,7 @@
#include "xe_guc_db_mgr.h"
#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
#include "xe_guc_relay.h"
@@ -59,7 +60,7 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
xe_assert(xe, addr < GUC_GGTT_TOP);
- xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
+ xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
return addr;
}
@@ -420,7 +421,7 @@ static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 t
buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
- xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
desc, buf, G2G_BUFFER_SIZE);
@@ -483,7 +484,8 @@ static int guc_g2g_alloc(struct xe_guc *guc)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -569,6 +571,86 @@ err_deregister:
return err;
}
+static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ num_dwords
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static bool supports_dynamic_ics(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ /* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
+ if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
+ return false;
+
+ /*
+ * The feature is currently not compatible with multi-lrc, so the GuC
+ * does not support it at all on the media engines (which are the main
+ * users of mlrc). On the primary GT side, to avoid it being used in
+ * conjunction with mlrc, we only enable it if we are in single CCS
+ * mode.
+ */
+ if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
+ return false;
+
+ /*
+ * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
+ * version v1.18.4.
+ */
+ return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
+}
+
+#define OPT_IN_MAX_DWORDS 16
+int xe_guc_opt_in_features_enable(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
+ u32 count = 0;
+ u32 *klvs;
+ int ret;
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ klvs = xe_guc_buf_cpu_ptr(buf);
+
+ /*
+ * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
+ * to compatibility version v1.7.0.
+ * Note that the GuC allows enabling this KLV even on platforms that do
+ * not support the extra type; in such case the returned type variable
+ * will be set to a known invalid value which we can check against.
+ */
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
+
+ if (supports_dynamic_ics(guc))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
+
+ if (count) {
+ xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
+
+ ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC opt-in features: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -576,7 +658,7 @@ static void guc_fini_hw(void *arg)
unsigned int fw_ref;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_fini_hw(&guc_to_gt(guc)->uc);
+ xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
guc_g2g_fini(guc);
@@ -626,23 +708,51 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
return 0;
}
-static int vf_guc_init(struct xe_guc *guc)
+static int vf_guc_init_noalloc(struct xe_guc *guc)
{
+ struct xe_gt *gt = guc_to_gt(guc);
int err;
- xe_guc_comm_init_early(guc);
-
- err = xe_guc_ct_init(&guc->ct);
+ err = xe_gt_sriov_vf_bootstrap(gt);
if (err)
return err;
- err = xe_guc_relay_init(&guc->relay);
+ err = xe_gt_sriov_vf_query_config(gt);
if (err)
return err;
return 0;
}
+int xe_guc_init_noalloc(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ xe_guc_comm_init_early(guc);
+
+ ret = xe_guc_ct_init_noalloc(&guc->ct);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_relay_init(&guc->relay);
+ if (ret)
+ goto out;
+
+ if (IS_SRIOV_VF(xe)) {
+ ret = vf_guc_init_noalloc(guc);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_guc_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -652,13 +762,13 @@ int xe_guc_init(struct xe_guc *guc)
guc->fw.type = XE_UC_FW_TYPE_GUC;
ret = xe_uc_fw_init(&guc->fw);
if (ret)
- goto out;
+ return ret;
if (!xe_uc_fw_is_enabled(&guc->fw))
return 0;
if (IS_SRIOV_VF(xe)) {
- ret = vf_guc_init(guc);
+ ret = xe_guc_ct_init(&guc->ct);
if (ret)
goto out;
return 0;
@@ -680,10 +790,6 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = xe_guc_relay_init(&guc->relay);
- if (ret)
- goto out;
-
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
@@ -692,8 +798,6 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc);
- xe_guc_comm_init_early(guc);
-
return 0;
out:
@@ -709,6 +813,10 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
if (err)
return err;
+ err = xe_guc_buf_cache_init(&guc->buf);
+ if (err)
+ return err;
+
/* XXX xe_guc_db_mgr_init not needed for now */
return 0;
@@ -762,6 +870,10 @@ int xe_guc_post_load_init(struct xe_guc *guc)
xe_guc_ads_populate_post_load(&guc->ads);
+ ret = xe_guc_opt_in_features_enable(guc);
+ if (ret)
+ return ret;
+
if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
ret = guc_g2g_start(guc);
if (ret)
@@ -1097,14 +1209,6 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int ret;
- ret = xe_gt_sriov_vf_bootstrap(gt);
- if (ret)
- return ret;
-
- ret = xe_gt_sriov_vf_query_config(gt);
- if (ret)
- return ret;
-
ret = xe_guc_hwconfig_init(guc);
if (ret)
return ret;
@@ -1115,13 +1219,17 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
ret = xe_gt_sriov_vf_connect(gt);
if (ret)
- return ret;
+ goto err_out;
ret = xe_gt_sriov_vf_query_runtime(gt);
if (ret)
- return ret;
+ goto err_out;
return 0;
+
+err_out:
+ xe_guc_sanitize(guc);
+ return ret;
}
/**
@@ -1284,6 +1392,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
+ bool lost = false;
int ret;
int i;
@@ -1317,6 +1426,12 @@ retry:
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
+ /* scratch registers might be cleared during FLR, try once more */
+ if (!reply && !lost) {
+ xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
+ lost = true;
+ goto retry;
+ }
timeout:
xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
request[0], reply);
@@ -1393,6 +1508,7 @@ proto:
/* Use data from the GuC response as our return value */
return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
+ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
@@ -1508,30 +1624,32 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_uc_fw_print(&guc->fw, p);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return;
+ if (!IS_SRIOV_VF(gt_to_xe(gt))) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
+
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ }
- status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
-
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
- drm_printf(p, "\tBootrom status = 0x%x\n",
- REG_FIELD_GET(GS_BOOTROM_MASK, status));
- drm_printf(p, "\tuKernel status = 0x%x\n",
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- drm_printf(p, "\tMIA Core status = 0x%x\n",
- REG_FIELD_GET(GS_MIA_MASK, status));
- drm_printf(p, "\tLog level = %d\n",
- xe_guc_log_get_level(&guc->log));
-
- drm_puts(p, "\nScratch registers:\n");
- for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
- drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 58338be44558..22cf019a11bf 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -26,6 +26,7 @@
struct drm_printer;
void xe_guc_comm_init_early(struct xe_guc *guc);
+int xe_guc_init_noalloc(struct xe_guc *guc);
int xe_guc_init(struct xe_guc *guc);
int xe_guc_init_post_hwconfig(struct xe_guc *guc);
int xe_guc_post_load_init(struct xe_guc *guc);
@@ -33,6 +34,7 @@ int xe_guc_reset(struct xe_guc *guc);
int xe_guc_upload(struct xe_guc *guc);
int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
int xe_guc_enable_communication(struct xe_guc *guc);
+int xe_guc_opt_in_features_enable(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 7031542a70ce..131cfc56be00 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -20,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
@@ -376,6 +377,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
&offset, &remain);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
@@ -414,7 +420,8 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -710,8 +717,8 @@ static int guc_capture_prep_lists(struct xe_guc_ads *ads)
}
if (ads->capture_size != PAGE_ALIGN(total_size))
- xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
- ads->capture_size, PAGE_ALIGN(total_size));
+ xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
+ PAGE_ALIGN(total_size), ads->capture_size);
return PAGE_ALIGN(total_size);
}
@@ -883,7 +890,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
@@ -907,7 +914,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
@@ -998,16 +1005,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
*/
int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
{
- struct xe_device *xe = ads_to_xe(ads);
- struct xe_gt *gt = ads_to_gt(ads);
- struct xe_tile *tile = gt_to_tile(gt);
struct guc_policies *policies;
- struct xe_bo *bo;
- int ret = 0;
+ struct xe_guc *guc = ads_to_guc(ads);
+ struct xe_device *xe = ads_to_xe(ads);
+ CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
- policies = kmalloc(sizeof(*policies), GFP_KERNEL);
- if (!policies)
- return -ENOMEM;
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ policies = xe_guc_buf_cpu_ptr(buf);
+ memset(policies, 0, sizeof(*policies));
policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
@@ -1017,16 +1024,5 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
else
policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
- bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto out;
- }
-
- ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
-out:
- kfree(policies);
- return ret;
+ return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
}
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 0193c94dd6a0..14a07dca48e7 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -37,10 +37,6 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam;
- /* XXX: currently it's useful only for the PF actions */
- if (!IS_SRIOV_PF(gt_to_xe(gt)))
- return 0;
-
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
if (IS_ERR(sam))
return PTR_ERR(sam);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 9095618648bc..243dad3e2418 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -105,49 +105,49 @@ struct __guc_capture_parsed_output {
* 3. Incorrect order will trigger XE_WARN.
*/
#define COMMON_XELP_BASE_GLOBAL \
- { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"}
#define COMMON_BASE_ENGINE_INSTANCE \
- { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \
- { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \
- { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \
- { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \
- { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \
- { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \
- { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \
- { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \
- { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \
- { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \
- { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \
- { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \
- { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \
- { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \
- { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \
- { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \
- { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \
- { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \
- { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \
- { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
#define COMMON_XELP_RC_CLASS \
- { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"}
+ { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"}
#define COMMON_XELP_RC_CLASS_INSTDONE \
- { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \
- { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \
- { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"}
+ { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"}
#define XELP_VEC_CLASS_REGS \
- { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \
- { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \
- { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \
- { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"}
+ { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
@@ -352,7 +352,7 @@ static const struct __ext_steer_reg xehpg_extregs[] = {
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
- int slice_id, int subslice_id)
+ u32 dss_id, u16 slice_id, u16 subslice_id)
{
if (!ext || !extlist)
return;
@@ -361,6 +361,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->dss_id = dss_id;
ext->regname = extlist->name;
}
@@ -397,7 +398,7 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u16 slice, subslice;
- int iter, i, total = 0;
+ int dss, i, total = 0;
const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
@@ -454,15 +455,15 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
/* For steering registers, the list is generated at run-time */
extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
- for_each_dss_steering(iter, gt, slice, subslice) {
+ for_each_dss_steering(dss, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice);
++extarray;
}
if (has_xehpg_extregs)
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
- __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice);
++extarray;
}
}
@@ -1672,18 +1673,16 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
{
struct xe_gt *gt = snapshot->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = &gt->uc.guc;
struct xe_devcoredump *devcoredump = &xe->devcoredump;
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
struct gcap_reg_list_info *reginfo = NULL;
u32 i, last_value = 0;
- bool is_ext, low32_ready = false;
+ bool low32_ready = false;
if (!list || !list->list || list->num_regs == 0)
return;
XE_WARN_ON(!devcore_snapshot->matched_node);
- is_ext = list == guc->capture->extlists;
reginfo = &devcore_snapshot->matched_node->reginfo[type];
/*
@@ -1749,17 +1748,12 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
*/
XE_WARN_ON(low32_ready);
- if (is_ext) {
- int dss, group, instance;
-
- group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
- instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
- dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
-
- drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
- } else {
+ if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags))
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname,
+ reg_desc->dss_id, value);
+ else
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
- }
+
break;
}
}
@@ -1823,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
str_yes_no(snapshot->kernel_reserved));
for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ /*
+ * FIXME: During devcoredump print we should avoid accessing the
+ * driver pointers for gt or engine. Printing should be done only
+ * using the snapshot captured. Here we are accessing the gt
+ * pointer. It should be fixed.
+ */
list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
capture_class, false);
snapshot_print_by_list_order(snapshot, p, type, list);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index ca2d390ccbee..6cb439115597 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -39,6 +39,8 @@ struct __guc_mmio_reg_descr {
u32 flags;
/** @mask: The mask to apply */
u32 mask;
+ /** @dss_id: Cached index for steered registers */
+ u32 dss_id;
/** @regname: Name of the register */
const char *regname;
};
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 72ad576fc18e..3f4e6a46ff16 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -25,6 +25,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
@@ -34,6 +35,11 @@
#include "xe_pm.h"
#include "xe_trace_guc.h"
+static void receive_g2h(struct xe_guc_ct *ct);
+static void g2h_worker_func(struct work_struct *w);
+static void safe_mode_worker_func(struct work_struct *w);
+static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
enum {
/* Internal states, not error conditions */
@@ -79,22 +85,28 @@ struct g2h_fence {
u16 error;
u16 hint;
u16 reason;
+ bool cancel;
bool retry;
bool fail;
bool done;
};
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
+ memset(g2h_fence, 0, sizeof(*g2h_fence));
g2h_fence->response_buffer = response_buffer;
- g2h_fence->response_data = 0;
- g2h_fence->response_len = 0;
- g2h_fence->fail = false;
- g2h_fence->retry = false;
- g2h_fence->done = false;
g2h_fence->seqno = ~0x0;
}
+static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
+{
+ g2h_fence->cancel = true;
+ g2h_fence->fail = true;
+ g2h_fence->done = true;
+}
+
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
{
return g2h_fence->seqno == ~0x0;
@@ -186,14 +198,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+ ct_exit_safe_mode(ct);
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
-static void receive_g2h(struct xe_guc_ct *ct);
-static void g2h_worker_func(struct work_struct *w);
-static void safe_mode_worker_func(struct work_struct *w);
-
static void primelockdep(struct xe_guc_ct *ct)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -204,12 +213,10 @@ static void primelockdep(struct xe_guc_ct *ct)
fs_reclaim_release(GFP_KERNEL);
}
-int xe_guc_ct_init(struct xe_guc_ct *ct)
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_bo *bo;
int err;
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
@@ -235,21 +242,32 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
+ err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
+ if (err)
+ return err;
+
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+
+int xe_guc_ct_init(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
ct->bo = bo;
-
- err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
- if (err)
- return err;
-
- xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
- ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
@@ -370,9 +388,13 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
return ret > 0 ? -EPROTO : ret;
}
-static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+static void guc_ct_change_state(struct xe_guc_ct *ct,
enum xe_guc_ct_state state)
{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct g2h_fence *g2h_fence;
+ unsigned long idx;
+
mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
@@ -384,8 +406,20 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
ct->g2h_outstanding = 0;
ct->state = state;
+ xe_gt_dbg(gt, "GuC CT communication channel %s\n",
+ state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
+ str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
+
spin_unlock_irq(&ct->fast_lock);
+ /* cancel all in-flight send-recv requests */
+ xa_for_each(&ct->fence_lookup, idx, g2h_fence)
+ g2h_fence_cancel(g2h_fence);
+
+ /* make sure guc_ct_send_recv() will see g2h_fence changes */
+ smp_mb();
+ wake_up_all(&ct->g2h_fence_wq);
+
/*
* Lockdep doesn't like this under the fast lock and he destroy only
* needs to be serialized with the send path which ct lock provides.
@@ -439,7 +473,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -455,11 +489,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (err)
goto err_out;
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
smp_mb();
wake_up_all(&ct->wq);
- xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
@@ -500,7 +533,7 @@ static void stop_g2h_handler(struct xe_guc_ct *ct)
*/
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
ct_exit_safe_mode(ct);
stop_g2h_handler(ct);
}
@@ -513,7 +546,10 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ if (!xe_guc_ct_initialized(ct))
+ return;
+
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -624,6 +660,47 @@ static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
spin_unlock_irq(&ct->fast_lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+ unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ unsigned long entries[SZ_32];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ /* May be called under spinlock, so avoid sleeping */
+ ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
+#endif
+ ct->fast_req[slot].fence = fence;
+ ct->fast_req[slot].action = action;
+}
+#else
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+}
+#endif
+
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -700,6 +777,9 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
+ fast_req_track(ct, ct_fence_value,
+ FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
+
cmd[1] =
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
@@ -732,25 +812,6 @@ corrupted:
return -EPIPE;
}
-/*
- * The CT protocol accepts a 16 bits fence. This field is fully owned by the
- * driver, the GuC will just copy it to the reply message. Since we need to
- * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
- * we use one bit of the seqno as an indicator for that and a rolling counter
- * for the remaining 15 bits.
- */
-#define CT_SEQNO_MASK GENMASK(14, 0)
-#define CT_SEQNO_UNTRACKED BIT(15)
-static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
-{
- u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
-
- if (!is_g2h_fence)
- seqno |= CT_SEQNO_UNTRACKED;
-
- return seqno;
-}
-
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
@@ -759,7 +820,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@@ -1051,6 +1112,11 @@ retry_same_fence:
goto retry;
}
if (g2h_fence.fail) {
+ if (g2h_fence.cancel) {
+ xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
+ ret = -ECANCELED;
+ goto unlock;
+ }
xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
@@ -1059,6 +1125,7 @@ retry_same_fence:
if (ret > 0)
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+unlock:
mutex_unlock(&ct->lock);
return ret;
@@ -1088,6 +1155,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer)
@@ -1141,6 +1209,55 @@ static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+ u16 fence_min = U16_MAX, fence_max = 0;
+ struct xe_gt *gt = ct_to_gt(ct);
+ bool found = false;
+ unsigned int n;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ char *buf;
+#endif
+
+ lockdep_assert_held(&ct->lock);
+
+ for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
+ if (ct->fast_req[n].fence < fence_min)
+ fence_min = ct->fast_req[n].fence;
+ if (ct->fast_req[n].fence > fence_max)
+ fence_max = ct->fast_req[n].fence;
+
+ if (ct->fast_req[n].fence != fence)
+ continue;
+ found = true;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ buf = kmalloc(SZ_4K, GFP_NOWAIT);
+ if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
+ fence, ct->fast_req[n].action, buf);
+ else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
+ fence, ct->fast_req[n].action);
+ kfree(buf);
+#else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
+ fence, ct->fast_req[n].action);
+#endif
+ break;
+ }
+
+ if (!found)
+ xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
+ fence, fence_min, fence_max, ct->fence_seqno);
+}
+#else
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+}
+#endif
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1169,6 +1286,9 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+
+ fast_req_report(ct, fence);
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1342,7 +1462,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 action;
u32 *hxg;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1622,6 +1742,186 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
+static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
+ u32 size, u32 idx, s64 shift)
+{
+ u32 hi, lo;
+ u64 offset;
+
+ lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
+ hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
+ offset = make_u64(hi, lo);
+ offset += shift;
+ lo = lower_32_bits(offset);
+ hi = upper_32_bits(offset);
+ xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
+ xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
+}
+
+/*
+ * Shift any GGTT addresses within a single message left within CTB from
+ * before post-migration recovery.
+ * @ct: pointer to CT struct of the target GuC
+ * @cmds: iomap buffer containing CT messages
+ * @head: start of the target message within the buffer
+ * @len: length of the target message
+ * @size: size of the commands buffer
+ * @shift: the address shift to be added to each GGTT reference
+ * Return: true if the message was fixed or needed no fixups, false on failure
+ */
+static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
+ struct iosys_map *cmds, u32 head,
+ u32 len, u32 size, s64 shift)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 action, i, n;
+
+ xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
+
+ msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+
+ xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
+
+ switch (action) {
+ case XE_GUC_ACTION_REGISTER_CONTEXT:
+ if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
+ break;
+ case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
+ if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
+ goto err_len;
+ n = xe_map_rd_ring_u32(xe, cmds, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
+ if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ for (i = 0; i < n; i++)
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
+ + 2 * i, shift);
+ break;
+ default:
+ break;
+ }
+ return true;
+
+err_len:
+ xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
+ return false;
+}
+
+/*
+ * Apply fixups to the next outgoing CT message within given CTB
+ * @ct: the &xe_guc_ct struct instance representing the target GuC
+ * @h2g: the &guc_ctb struct instance of the target buffer
+ * @shift: shift to be added to all GGTT addresses within the CTB
+ * @mhead: pointer to an integer storing message start position; the
+ * position is changed to next message before this function return
+ * @avail: size of the area available for parsing, that is length
+ * of all remaining messages stored within the CTB
+ * Return: size of the area available for parsing after one message
+ * has been parsed, that is length remaining from the updated mhead
+ */
+static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
+ s64 shift, u32 *mhead, s32 avail)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 size = h2g->info.size;
+ u32 head = *mhead;
+ u32 len;
+
+ xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
+
+ /* Read header */
+ msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+
+ if (unlikely(len > (u32)avail)) {
+ xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
+ avail, len);
+ return 0;
+ }
+
+ head = (head + GUC_CTB_MSG_MIN_LEN) % size;
+ if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
+ return 0;
+ *mhead = (head + msg_len_to_hxg_len(len)) % size;
+
+ return avail - len;
+}
+
+/**
+ * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
+ * @ct: pointer to CT struct of the target GuC
+ * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
+ *
+ * Messages in GuC to Host CTB are owned by GuC and any fixups in them
+ * are made by GuC. But content of the Host to GuC CTB is owned by the
+ * KMD, so fixups to GGTT references in any pending messages need to be
+ * applied here.
+ * This function updates GGTT offsets in payloads of pending H2G CTB
+ * messages (messages which were not consumed by GuC before the VF got
+ * paused).
+ */
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 head, tail, size;
+ s32 avail;
+
+ if (unlikely(h2g->info.broken))
+ return;
+
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+ head = h2g->info.head;
+ tail = READ_ONCE(h2g->info.tail);
+ size = h2g->info.size;
+
+ if (unlikely(head > size))
+ goto corrupted;
+
+ if (unlikely(tail >= size))
+ goto corrupted;
+
+ avail = tail - head;
+
+ /* beware of buffer wrap case */
+ if (unlikely(avail < 0))
+ avail += size;
+ xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
+ xe_gt_assert(gt, avail >= 0);
+
+ while (avail > 0)
+ avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
+
+ return;
+
+corrupted:
+ xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
+ head, tail, size);
+ h2g->info.broken = true;
+}
+
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
@@ -1632,7 +1932,7 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bo
return NULL;
if (ct->bo && want_ctb) {
- snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb_size = xe_bo_size(ct->bo);
snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
@@ -1768,6 +2068,24 @@ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * This is a helper function which assists the driver in identifying if a fault
+ * injection test is currently active, allowing it to reduce unnecessary debug
+ * output. Typically, the function returns zero, but the fault injection
+ * framework can alter this to return an error. Since faults are injected
+ * through this function, it's important to ensure the compiler doesn't optimize
+ * it into an inline function. To avoid such optimization, the 'noinline'
+ * attribute is applied. Compiler optimizes the static function defined in the
+ * header file as an inline function.
+ */
+noinline int xe_is_injection_active(void) { return 0; }
+ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
+#else
+int xe_is_injection_active(void) { return 0; }
+#endif
+
static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
{
struct xe_guc_log_snapshot *snapshot_log;
@@ -1778,6 +2096,12 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
if (ctb)
ctb->info.broken = true;
+ /*
+ * Huge dump is getting generated when injecting error for guc CT/MMIO
+ * functions. So, let us suppress the dump when fault is injected.
+ */
+ if (xe_is_injection_active())
+ return;
/* Ignore further errors after the first dump until a reset */
if (ct->dead.reported)
@@ -1828,10 +2152,9 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
- drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
-
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
xe_device_snapshot_print(xe, &lp);
drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 82c4ae458dda..18d4225e6502 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -11,6 +11,7 @@
struct drm_printer;
struct xe_device;
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
@@ -22,6 +23,13 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
+
+static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+{
+ return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
+}
+
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8e1b9d981d61..8b03b50313d9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/iosys-map.h>
#include <linux/spinlock_types.h>
+#include <linux/stackdepot.h>
#include <linux/wait.h>
#include <linux/xarray.h>
@@ -104,6 +105,18 @@ struct xe_dead_ct {
/** snapshot_log: copy of GuC log at point of error */
struct xe_guc_log_snapshot *snapshot_log;
};
+
+/** struct xe_fast_req_fence - Used to track FAST_REQ messages by fence to match error responses */
+struct xe_fast_req_fence {
+ /** @fence: sequence number sent in H2G and return in G2H error */
+ u16 fence;
+ /** @action: H2G action code */
+ u16 action;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ /** @stack: call stack from when the H2G was sent */
+ depot_stack_handle_t stack;
+#endif
+};
#endif
/**
@@ -152,6 +165,8 @@ struct xe_guc_ct {
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
/** @dead: information for debugging dead CTs */
struct xe_dead_ct dead;
+ /** @fast_req: history of FAST_REQ messages for matching with G2H error responses */
+ struct xe_fast_req_fence fast_req[SZ_32];
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index c569ff456e74..0b102ab46c4d 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -17,101 +17,130 @@
#include "xe_macros.h"
#include "xe_pm.h"
-static struct xe_guc *node_to_guc(struct drm_info_node *node)
-{
- return node->info_ent->data;
-}
-
-static int guc_info(struct seq_file *m, void *data)
+/*
+ * guc_debugfs_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_guc specific in similar way how we handle &xe_gt
+ * specific files using &xe_gt_debugfs_simple_show.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which grandparent struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt)
+ * │   ├── uc # dent->d_parent
+ * │   │   ├── guc_info # dent
+ * │   │   ├── guc_...
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_guc *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_guc *guc, struct drm_printer *p)
+ * {
+ * drm_printf(p, "enabled %d\n", guc->submission_state.enabled);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = guc_debugfs_show, .data = foo },
+ * };
+ *
+ * parent = debugfs_create_dir("uc", gtdir);
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int guc_debugfs_show(struct seq_file *m, void *data)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct dentry *grandparent = parent->d_parent;
+ struct xe_gt *gt = grandparent->d_inode->i_private;
+ struct xe_device *xe = gt_to_xe(gt);
+ int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
+ int ret;
xe_pm_runtime_get(xe);
- xe_guc_print_info(guc, &p);
+ ret = print(&gt->uc.guc, &p);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
-static int guc_log(struct seq_file *m, void *data)
+static int guc_log(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_log_print(&guc->log, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_log_print(&guc->log, p);
return 0;
}
-static int guc_log_dmesg(struct seq_file *m, void *data)
+static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_pm_runtime_get(xe);
xe_guc_log_print_dmesg(&guc->log);
- xe_pm_runtime_put(xe);
-
return 0;
}
-static int guc_ctb(struct seq_file *m, void *data)
+static int guc_ctb(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_ct_print(&guc->ct, &p, true);
- xe_pm_runtime_put(xe);
-
+ xe_guc_ct_print(&guc->ct, p, true);
return 0;
}
-static int guc_pc(struct seq_file *m, void *data)
+static int guc_pc(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_pc_print(&guc->pc, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_pc_print(&guc->pc, p);
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"guc_info", guc_info, 0},
- {"guc_log", guc_log, 0},
- {"guc_log_dmesg", guc_log_dmesg, 0},
- {"guc_ctb", guc_ctb, 0},
- {"guc_pc", guc_pc, 0},
+/*
+ * only for GuC debugfs files which can be safely used on the VF as well:
+ * - without access to the GuC privileged registers
+ * - without access to the PF specific GuC objects
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info },
+ { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb },
+};
+
+/* For GuC debugfs files that require the SLPC support */
+static const struct drm_info_list slpc_debugfs_list[] = {
+ { "guc_pc", .show = guc_debugfs_show, .data = guc_pc },
+};
+
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ { "guc_log", .show = guc_debugfs_show, .data = guc_log },
+ { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg },
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
{
- struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
- struct drm_info_list *local;
- int i;
-
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_minor *minor = xe->drm.primary;
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ parent, minor);
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = guc;
+ if (!IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ parent, minor);
- drm_debugfs_create_files(local,
- ARRAY_SIZE(debugfs_list),
- parent, minor);
+ if (!xe->info.skip_guc_pc)
+ drm_debugfs_create_files(slpc_debugfs_list,
+ ARRAY_SIZE(slpc_debugfs_list),
+ parent, minor);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 2a457dcf31d5..92e1f9f41b8c 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -17,36 +17,61 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov_pf_helpers.h"
#include "xe_trace_guc.h"
#define TOTAL_QUANTA 0x8000
-static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
size_t offset;
- offset = offsetof(struct guc_engine_activity_data,
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_data) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ offset += offsetof(struct guc_engine_activity_data,
engine_activity[guc_class][hwe->logical_instance]);
return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
}
-static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+static struct iosys_map engine_metadata_map(struct xe_guc *guc,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
+ size_t offset;
- return buffer->metadata_bo->vmap;
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_metadata) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
}
static int allocate_engine_activity_group(struct xe_guc *guc)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
struct xe_device *xe = guc_to_xe(guc);
- u32 num_activity_group = 1; /* Will be modified for VF */
+ u32 num_activity_group;
+
+ /*
+ * An additional activity group is allocated for PF
+ */
+ num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
sizeof(struct engine_activity_group), GFP_KERNEL);
@@ -60,10 +85,11 @@ static int allocate_engine_activity_group(struct xe_guc *guc)
}
static int allocate_engine_activity_buffers(struct xe_guc *guc,
- struct engine_activity_buffer *buffer)
+ struct engine_activity_buffer *buffer,
+ int count)
{
- u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
- u32 size = sizeof(struct guc_engine_activity_data);
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
+ u32 size = sizeof(struct guc_engine_activity_data) * count;
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
@@ -98,7 +124,7 @@ static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
static bool is_engine_activity_supported(struct xe_guc *guc)
{
struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
- struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 };
struct xe_gt *gt = guc_to_gt(guc);
if (IS_SRIOV_VF(gt_to_xe(gt))) {
@@ -118,10 +144,11 @@ static bool is_engine_activity_supported(struct xe_guc *guc)
return true;
}
-static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc *guc = &hwe->gt->uc.guc;
- struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ struct engine_activity_group *eag = &guc->engine_activity.eag[index];
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
return &eag->engine[guc_class][hwe->logical_instance];
@@ -138,9 +165,10 @@ static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
#define read_metadata_record(xe_, map_, field_) \
xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
-static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity *cached_activity = &ea->activity;
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
@@ -151,8 +179,8 @@ static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 active_ticks, gpm_ts;
u16 change_num;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
/* GuC has not initialized activity data yet, return 0 */
@@ -194,9 +222,9 @@ update:
return ea->total + ea->active;
}
-static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct guc_engine_activity *cached_activity = &ea->activity;
struct iosys_map activity_map, metadata_map;
@@ -205,8 +233,8 @@ static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 numerator;
u16 quanta_ratio;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
if (!cached_metadata->guc_tsc_frequency_hz)
cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
@@ -245,12 +273,39 @@ static int enable_engine_activity_stats(struct xe_guc *guc)
return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
}
-static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_group *eag = &engine_activity->eag[0];
+ u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ u32 action[6];
+ int len = 0;
+
+ if (enable) {
+ metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
+ ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
+ num_functions = engine_activity->num_functions;
+ }
+
+ action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
+ action[len++] = num_functions;
+ action[len++] = metadata_ggtt_addr;
+ action[len++] = 0;
+ action[len++] = ggtt_addr;
+ action[len++] = 0;
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[index];
int i, j;
+ xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
+
for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
eag->engine[i][j].last_cpu_ts = ktime_get();
@@ -265,34 +320,107 @@ static u32 gpm_timestamp_shift(struct xe_gt *gt)
return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
+static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ if (!IS_SRIOV_PF(xe) && fn_id)
+ return false;
+
+ if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
+ return false;
+
+ return true;
+}
+
+static int engine_activity_disable_function_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret;
+
+ if (!engine_activity->num_functions)
+ return 0;
+
+ ret = enable_function_engine_activity_stats(guc, false);
+ if (ret)
+ return ret;
+
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+
+ return 0;
+}
+
+static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret, i;
+
+ if (!num_vfs)
+ return 0;
+
+ /* This includes 1 PF and num_vfs */
+ engine_activity->num_functions = num_vfs + 1;
+
+ ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
+ if (ret)
+ return ret;
+
+ ret = enable_function_engine_activity_stats(guc, true);
+ if (ret) {
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+ return ret;
+ }
+
+ /* skip PF as it was already setup */
+ for (i = 1; i < engine_activity->num_functions; i++)
+ engine_activity_set_cpu_ts(guc, i);
+
+ return 0;
+}
+
/**
* xe_guc_engine_activity_active_ticks - Get engine active ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated ticks @hwe was active since engine activity stats were enabled.
*/
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_active_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe, fn_id);
}
/**
* xe_guc_engine_activity_total_ticks - Get engine total ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated quanta of ticks allocated for the engine
*/
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_total_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe, fn_id);
}
/**
@@ -311,6 +439,25 @@ bool xe_guc_engine_activity_supported(struct xe_guc *guc)
}
/**
+ * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
+ * @guc: The GuC object
+ * @num_vfs: number of vfs
+ * @enable: true to enable, false otherwise
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (enable)
+ return engine_activity_enable_function_stats(guc, num_vfs);
+
+ return engine_activity_disable_function_stats(guc);
+}
+
+/**
* xe_guc_engine_activity_enable_stats - Enable engine activity stats
* @guc: The GuC object
*
@@ -327,7 +474,7 @@ void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
if (ret)
xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
else
- engine_activity_set_cpu_ts(guc);
+ engine_activity_set_cpu_ts(guc, 0);
}
static void engine_activity_fini(void *arg)
@@ -360,7 +507,7 @@ int xe_guc_engine_activity_init(struct xe_guc *guc)
return ret;
}
- ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
if (ret) {
xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
index a042d4cb404c..b32926c2d208 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -14,6 +14,9 @@ struct xe_guc;
int xe_guc_engine_activity_init(struct xe_guc *guc);
bool xe_guc_engine_activity_supported(struct xe_guc *guc);
void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
index 5cdd034b6b70..48f69ddefa36 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -79,14 +79,24 @@ struct xe_guc_engine_activity {
/** @num_activity_group: number of activity groups */
u32 num_activity_group;
+ /** @num_functions: number of functions */
+ u32 num_functions;
+
/** @supported: indicates support for engine activity stats */
bool supported;
- /** @eag: holds the device level engine activity data */
+ /**
+ * @eag: holds the device level engine activity data in native mode.
+ * In SRIOV mode, points to an array with entries which holds the engine
+ * activity data for PF and VF's
+ */
struct engine_activity_group *eag;
/** @device_buffer: buffer object for global engine activity */
struct engine_activity_buffer device_buffer;
+
+ /** @function_buffer: buffer object for per-function engine activity */
+ struct engine_activity_buffer function_buffer;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index 4c39f01e4f52..a3f421e2adc0 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -20,6 +20,8 @@ struct xe_exec_queue;
struct xe_guc_exec_queue {
/** @q: Backpointer to parent xe_exec_queue */
struct xe_exec_queue *q;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
/** @sched: GPU scheduler for this xe_exec_queue */
struct xe_gpu_scheduler sched;
/** @entity: Scheduler entity for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 80514a446ba2..c01ccb35dc75 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -79,7 +79,7 @@ static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *
* Also, can't use vmalloc as might be called from atomic context. So need
* to break the buffer up into smaller chunks that can be allocated.
*/
- snapshot->size = log->bo->size;
+ snapshot->size = xe_bo_size(log->bo);
snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE);
snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
@@ -260,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 5b896f5fafaf..f1e2b0be90a9 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -12,7 +12,7 @@
struct drm_printer;
struct xe_device;
-#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 43b1192ba61c..68b192fe3b32 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -5,8 +5,11 @@
#include "xe_guc_pc.h"
+#include <linux/cleanup.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/ktime.h>
+#include <linux/wait_bit.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
@@ -51,9 +54,12 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define BMG_MIN_FREQ 1200
+#define BMG_MERT_FLUSH_FREQ_CAP 2600
#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+#define SLPC_ACT_FREQ_TIMEOUT_MS 100
/**
* DOC: GuC Power Conservation (PC)
@@ -141,6 +147,36 @@ static int wait_for_pc_state(struct xe_guc_pc *pc,
return -ETIMEDOUT;
}
+static int wait_for_flush_complete(struct xe_guc_pc *pc)
+{
+ const unsigned long timeout = msecs_to_jiffies(30);
+
+ if (!wait_var_event_timeout(&pc->flush_freq_limit,
+ !atomic_read(&pc->flush_freq_limit),
+ timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
+{
+ int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
+ int slept, wait = 10;
+
+ for (slept = 0; slept < timeout_us;) {
+ if (xe_guc_pc_get_act_freq(pc) <= freq)
+ return 0;
+
+ usleep_range(wait, wait << 1);
+ slept += wait;
+ wait <<= 1;
+ if (slept + wait > timeout_us)
+ wait = timeout_us - slept;
+ }
+
+ return -ETIMEDOUT;
+}
static int pc_action_reset(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = pc_to_ct(pc);
@@ -153,7 +189,7 @@ static int pc_action_reset(struct xe_guc_pc *pc)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
ERR_PTR(ret));
@@ -177,7 +213,7 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
/* Blocking here to ensure the results are ready before reading them */
ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
ERR_PTR(ret));
@@ -200,7 +236,7 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
id, value, ERR_PTR(ret));
@@ -222,7 +258,7 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
ERR_PTR(ret));
@@ -239,7 +275,7 @@ static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
mode, ERR_PTR(ret));
return ret;
@@ -462,6 +498,21 @@ static u32 get_cur_freq(struct xe_gt *gt)
}
/**
+ * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
+ * @pc: The GuC PC
+ *
+ * Returns: the requested frequency for that GT instance
+ */
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ return get_cur_freq(gt);
+}
+
+/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
* @freq: A pointer to a u32 where the freq value will be returned
@@ -538,6 +589,25 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
return pc->rpn_freq;
}
+static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
+{
+ int ret;
+
+ lockdep_assert_held(&pc->freq_lock);
+
+ /* Might be in the middle of a gt reset */
+ if (!pc->freq_ready)
+ return -EAGAIN;
+
+ ret = pc_action_query_task_state(pc);
+ if (ret)
+ return ret;
+
+ *freq = pc_get_min_freq(pc);
+
+ return 0;
+}
+
/**
* xe_guc_pc_get_min_freq - Get the min operational frequency
* @pc: The GuC PC
@@ -548,26 +618,28 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
*/
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
{
+ guard(mutex)(&pc->freq_lock);
+
+ return xe_guc_pc_get_min_freq_locked(pc, freq);
+}
+
+static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
+{
int ret;
- xe_device_assert_mem_access(pc_to_xe(pc));
+ lockdep_assert_held(&pc->freq_lock);
- mutex_lock(&pc->freq_lock);
- if (!pc->freq_ready) {
- /* Might be in the middle of a gt reset */
- ret = -EAGAIN;
- goto out;
- }
+ /* Might be in the middle of a gt reset */
+ if (!pc->freq_ready)
+ return -EAGAIN;
- ret = pc_action_query_task_state(pc);
+ ret = pc_set_min_freq(pc, freq);
if (ret)
- goto out;
+ return ret;
- *freq = pc_get_min_freq(pc);
+ pc->user_requested_min = freq;
-out:
- mutex_unlock(&pc->freq_lock);
- return ret;
+ return 0;
}
/**
@@ -581,24 +653,28 @@ out:
*/
int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
{
+ guard(mutex)(&pc->freq_lock);
+
+ return xe_guc_pc_set_min_freq_locked(pc, freq);
+}
+
+static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
+{
int ret;
- mutex_lock(&pc->freq_lock);
- if (!pc->freq_ready) {
- /* Might be in the middle of a gt reset */
- ret = -EAGAIN;
- goto out;
- }
+ lockdep_assert_held(&pc->freq_lock);
- ret = pc_set_min_freq(pc, freq);
+ /* Might be in the middle of a gt reset */
+ if (!pc->freq_ready)
+ return -EAGAIN;
+
+ ret = pc_action_query_task_state(pc);
if (ret)
- goto out;
+ return ret;
- pc->user_requested_min = freq;
+ *freq = pc_get_max_freq(pc);
-out:
- mutex_unlock(&pc->freq_lock);
- return ret;
+ return 0;
}
/**
@@ -611,24 +687,28 @@ out:
*/
int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
{
+ guard(mutex)(&pc->freq_lock);
+
+ return xe_guc_pc_get_max_freq_locked(pc, freq);
+}
+
+static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
+{
int ret;
- mutex_lock(&pc->freq_lock);
- if (!pc->freq_ready) {
- /* Might be in the middle of a gt reset */
- ret = -EAGAIN;
- goto out;
- }
+ lockdep_assert_held(&pc->freq_lock);
- ret = pc_action_query_task_state(pc);
+ /* Might be in the middle of a gt reset */
+ if (!pc->freq_ready)
+ return -EAGAIN;
+
+ ret = pc_set_max_freq(pc, freq);
if (ret)
- goto out;
+ return ret;
- *freq = pc_get_max_freq(pc);
+ pc->user_requested_max = freq;
-out:
- mutex_unlock(&pc->freq_lock);
- return ret;
+ return 0;
}
/**
@@ -642,24 +722,14 @@ out:
*/
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
- int ret;
-
- mutex_lock(&pc->freq_lock);
- if (!pc->freq_ready) {
- /* Might be in the middle of a gt reset */
- ret = -EAGAIN;
- goto out;
+ if (XE_WA(pc_to_gt(pc), 22019338487)) {
+ if (wait_for_flush_complete(pc) != 0)
+ return -EAGAIN;
}
- ret = pc_set_max_freq(pc, freq);
- if (ret)
- goto out;
-
- pc->user_requested_max = freq;
+ guard(mutex)(&pc->freq_lock);
-out:
- mutex_unlock(&pc->freq_lock);
- return ret;
+ return xe_guc_pc_set_max_freq_locked(pc, freq);
}
/**
@@ -802,6 +872,7 @@ void xe_guc_pc_init_early(struct xe_guc_pc *pc)
static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
{
+ struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
int ret;
lockdep_assert_held(&pc->freq_lock);
@@ -828,6 +899,9 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
+ if (XE_WA(tile->primary_gt, 14022085890))
+ ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
+
out:
return ret;
}
@@ -853,30 +927,117 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
return ret;
}
-static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
+static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
{
- int ret = 0;
+ struct xe_gt *gt = pc_to_gt(pc);
- if (XE_WA(pc_to_gt(pc), 22019338487)) {
- /*
- * Get updated min/max and stash them.
- */
- ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
- if (!ret)
- ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
- if (ret)
- return ret;
+ return XE_WA(gt, 22019338487) &&
+ pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
+}
+
+/**
+ * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
+ * @pc: the xe_guc_pc object
+ *
+ * As per the WA, reduce max GT frequency during L2 cache flush
+ */
+void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 max_freq;
+ int ret;
+
+ if (!needs_flush_freq_limit(pc))
+ return;
+
+ guard(mutex)(&pc->freq_lock);
+
+ ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
+ if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
+ ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
+ if (ret) {
+ xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
+ BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
+ return;
+ }
+
+ atomic_set(&pc->flush_freq_limit, 1);
/*
- * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
+ * If user has previously changed max freq, stash that value to
+ * restore later, otherwise use the current max. New user
+ * requests wait on flush.
*/
- mutex_lock(&pc->freq_lock);
- ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
- if (!ret)
- ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
- mutex_unlock(&pc->freq_lock);
+ if (pc->user_requested_max != 0)
+ pc->stashed_max_freq = pc->user_requested_max;
+ else
+ pc->stashed_max_freq = max_freq;
}
+ /*
+ * Wait for actual freq to go below the flush cap: even if the previous
+ * max was below cap, the current one might still be above it
+ */
+ ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
+ if (ret)
+ xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
+ BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
+}
+
+/**
+ * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
+ * @pc: the xe_guc_pc object
+ *
+ * Retrieve the previous GT max frequency value.
+ */
+void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ int ret = 0;
+
+ if (!needs_flush_freq_limit(pc))
+ return;
+
+ if (!atomic_read(&pc->flush_freq_limit))
+ return;
+
+ mutex_lock(&pc->freq_lock);
+
+ ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
+ if (ret)
+ xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
+ pc->stashed_max_freq, ret);
+
+ atomic_set(&pc->flush_freq_limit, 0);
+ mutex_unlock(&pc->freq_lock);
+ wake_up_var(&pc->flush_freq_limit);
+}
+
+static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
+{
+ int ret;
+
+ if (!XE_WA(pc_to_gt(pc), 22019338487))
+ return 0;
+
+ guard(mutex)(&pc->freq_lock);
+
+ /*
+ * Get updated min/max and stash them.
+ */
+ ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
+ if (!ret)
+ ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
+ */
+ ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
+ if (!ret)
+ ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
+
return ret;
}
@@ -1053,7 +1214,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
- memset(pc->bo->vmap.vaddr, 0, size);
+ xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
earlier = ktime_get();
@@ -1170,7 +1331,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 39102b79602f..52ecdd5ddbff 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -22,6 +22,7 @@ void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
@@ -37,5 +38,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc);
void xe_guc_pc_init_early(struct xe_guc_pc *pc);
int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc);
void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc);
+void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc);
+void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc);
#endif /* _XE_GUC_PC_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index 2978ac9a249b..c02053948a57 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -15,6 +15,8 @@
struct xe_guc_pc {
/** @bo: GGTT buffer object that is shared with GuC PC */
struct xe_bo *bo;
+ /** @flush_freq_limit: 1 when max freq changes are limited by driver */
+ atomic_t flush_freq_limit;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
/** @rpa_freq: HW RPa frequency - The Achievable one */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 31bc2022bfc2..cafb47711e9b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -229,6 +229,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = wait_event_timeout(guc->submission_state.fini_wq,
+ xa_empty(&guc->submission_state.exec_queue_lookup),
+ HZ * 5);
+
+ drain_workqueue(xe->destroy_wq);
+
+ xe_gt_assert(gt, ret);
xa_destroy(&guc->submission_state.exec_queue_lookup);
}
@@ -300,6 +311,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
+ guc->submission_state.initialized = true;
+
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@@ -485,6 +498,15 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), q->width ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]);
xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
@@ -509,6 +531,14 @@ static void __register_exec_queue(struct xe_guc *guc,
info->hwlrca_hi,
};
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]);
+
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
@@ -834,6 +864,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ /*
+ * If device is being wedged even before submission_state is
+ * initialized, there's nothing to do here.
+ */
+ if (!guc->submission_state.initialized)
+ return;
+
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
@@ -871,12 +908,13 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_gpu_scheduler *sched = &ge->sched;
- bool wedged;
+ bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
- wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+ if (!exec_queue_killed(q))
+ wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
/* Kill the run_job / process_msg entry points */
xe_sched_submission_stop(sched);
@@ -941,7 +979,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
return xe_sched_invalidate_job(job, 2);
}
- ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*
@@ -950,10 +988,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
*/
xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
- if (ctx_timestamp < ctx_job_timestamp)
- diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
- else
- diff = ctx_timestamp - ctx_job_timestamp;
+ diff = ctx_timestamp - ctx_job_timestamp;
/*
* Ensure timeout is within 5% to account for an GuC scheduling latency
@@ -1050,7 +1085,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int err = -ETIME;
pid_t pid = -1;
int i = 0;
- bool wedged, skip_timeout_check;
+ bool wedged = false, skip_timeout_check;
/*
* TDR has fired before free job worker. Common if exec queue
@@ -1058,12 +1093,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- xe_sched_add_pending_job(sched, job);
- xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
- }
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
xe_sched_submission_stop(sched);
@@ -1096,7 +1127,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* doesn't work for SRIOV. For now assuming timeouts in wedged mode are
* genuine timeouts.
*/
- wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+ if (!exec_queue_killed(q))
+ wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
/* Engine state now stable, disable scheduling to check timestamp */
if (!wedged && exec_queue_registered(q)) {
@@ -1170,9 +1202,12 @@ trigger_reset:
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
- xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags, process_name, pid);
+
+ if (!exec_queue_killed(q))
+ xe_gt_notice(guc_to_gt(guc),
+ "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags, process_name, pid);
trace_xe_sched_job_timedout(job);
@@ -1228,7 +1263,7 @@ trigger_reset:
/* Start fence signaling */
xe_hw_fence_irq_start(q->fence_irq);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
enable_scheduling(q);
@@ -1238,10 +1273,8 @@ rearm:
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
- xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
static void __guc_exec_queue_fini_async(struct work_struct *w)
@@ -1262,7 +1295,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
- kfree(ge);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
xe_pm_runtime_put(guc_to_xe(guc));
}
@@ -1445,6 +1482,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->guc = ge;
ge->q = q;
+ init_rcu_head(&ge->rcu);
init_waitqueue_head(&ge->suspend_wait);
for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
@@ -1739,6 +1777,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (!guc->submission_state.initialized)
+ return 0;
+
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset
@@ -2045,12 +2086,16 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
u32 guc_id;
+ u32 type = XE_GUC_CAT_ERR_TYPE_INVALID;
- if (unlikely(len < 1))
+ if (unlikely(!len || len > 2))
return -EPROTO;
guc_id = msg[0];
+ if (len == 2)
+ type = msg[1];
+
if (guc_id == GUC_ID_UNKNOWN) {
/*
* GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
@@ -2064,8 +2109,19 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
if (unlikely(!q))
return -EPROTO;
- xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
- xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ /*
+ * The type is HW-defined and changes based on platform, so we don't
+ * decode it in the kernel and only check if it is valid.
+ * See bspec 54047 and 72187 for details.
+ */
+ if (type != XE_GUC_CAT_ERR_TYPE_INVALID)
+ xe_gt_dbg(gt,
+ "Engine memory CAT error [%u]: class=%s, logical_mask: 0x%x, guc_id=%d",
+ type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ else
+ xe_gt_dbg(gt,
+ "Engine memory CAT error: class=%s, logical_mask: 0x%x, guc_id=%d",
+ xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
trace_xe_exec_queue_memory_cat_error(q);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 63bac64429a5..1fde7614fcc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -89,6 +89,11 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /**
+ * @submission_state.initialized: mark when submission state is
+ * even initialized - before that not even the lock is valid
+ */
+ bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 27d11e06a82b..6d7b62724126 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -11,15 +11,12 @@
#include "xe_device_types.h"
#include "xe_drv.h"
#include "xe_heci_gsc.h"
+#include "regs/xe_gsc_regs.h"
#include "xe_platform_types.h"
#include "xe_survivability_mode.h"
#define GSC_BAR_LENGTH 0x00000FFC
-#define DG1_GSC_HECI2_BASE 0x259000
-#define PVC_GSC_HECI2_BASE 0x285000
-#define DG2_GSC_HECI2_BASE 0x374000
-
static void heci_gsc_irq_mask(struct irq_data *d)
{
/* generic irq handling */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index 6a846e4cb221..7e43b2dd6a32 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -171,7 +171,7 @@ static int huc_auth_via_gsccs(struct xe_huc *huc)
sizeof(struct pxp43_new_huc_auth_in));
wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
xe_bo_ggtt_addr(huc->fw.bo),
- huc->fw.bo->size);
+ xe_bo_size(huc->fw.bo));
do {
err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 93241fd0a4ba..796ba8c34a16 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -17,6 +17,7 @@
#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -693,7 +694,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vcs%u fused off\n", j);
+ xe_gt_info(gt, "vcs%u fused off\n", j);
}
}
@@ -703,7 +704,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vecs%u fused off\n", j);
+ xe_gt_info(gt, "vecs%u fused off\n", j);
}
}
}
@@ -728,15 +729,13 @@ static void read_copy_fuses(struct xe_gt *gt)
if (!(BIT(j / 2) & bcs_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_dss(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
/*
* CCS fusing based on DSS masks only applies to platforms that can
* have more than one CCS.
@@ -755,14 +754,13 @@ static void read_compute_fuses_from_dss(struct xe_gt *gt)
if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_reg(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
@@ -774,7 +772,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
if ((ccs_mask & BIT(j)) == 0) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
@@ -789,8 +787,6 @@ static void read_compute_fuses(struct xe_gt *gt)
static void check_gsc_availability(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
return;
@@ -806,7 +802,25 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
+ xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
+ }
+}
+
+static void check_sw_disable(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ if (!(gt->info.engine_mask & BIT(id)))
+ continue;
+
+ if (!(sw_allowed & BIT(id))) {
+ gt->info.engine_mask &= ~BIT(id);
+ xe_gt_info(gt, "%s disabled via configfs\n",
+ engine_infos[id].name);
+ }
}
}
@@ -818,6 +832,7 @@ int xe_hw_engines_init_early(struct xe_gt *gt)
read_copy_fuses(gt);
read_compute_fuses(gt);
check_gsc_availability(gt);
+ check_sw_disable(gt);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
@@ -1044,12 +1059,13 @@ struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device *xe,
struct drm_xe_engine_class_instance eci)
{
+ struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
unsigned int idx;
if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
- if (eci.gt_id >= xe->info.gt_count)
+ if (!gt)
return NULL;
idx = array_index_nospec(eci.engine_class,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index a440442b4d72..640950172088 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -605,6 +605,7 @@ err_object:
kobject_put(kobj);
return err;
}
+ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */
static void hw_engine_class_sysfs_fini(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 2d68c5b5262a..c926f840c87b 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -13,15 +13,6 @@
#include "xe_vm.h"
static void
-hw_engine_group_free(struct drm_device *drm, void *arg)
-{
- struct xe_hw_engine_group *group = arg;
-
- destroy_workqueue(group->resume_wq);
- kfree(group);
-}
-
-static void
hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
{
struct xe_exec_queue *q;
@@ -53,7 +44,7 @@ hw_engine_group_alloc(struct xe_device *xe)
struct xe_hw_engine_group *group;
int err;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
+ group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -61,14 +52,14 @@ hw_engine_group_alloc(struct xe_device *xe)
if (!group->resume_wq)
return ERR_PTR(-ENOMEM);
+ err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq);
+ if (err)
+ return ERR_PTR(err);
+
init_rwsem(&group->mode_sem);
INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
INIT_LIST_HEAD(&group->exec_queue_list);
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
- if (err)
- return ERR_PTR(err);
-
return group;
}
@@ -84,25 +75,18 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
enum xe_hw_engine_id id;
struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
struct xe_device *xe = gt_to_xe(gt);
- int err;
group_rcs_ccs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_rcs_ccs)) {
- err = PTR_ERR(group_rcs_ccs);
- goto err_group_rcs_ccs;
- }
+ if (IS_ERR(group_rcs_ccs))
+ return PTR_ERR(group_rcs_ccs);
group_bcs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_bcs)) {
- err = PTR_ERR(group_bcs);
- goto err_group_bcs;
- }
+ if (IS_ERR(group_bcs))
+ return PTR_ERR(group_bcs);
group_vcs_vecs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_vcs_vecs)) {
- err = PTR_ERR(group_vcs_vecs);
- goto err_group_vcs_vecs;
- }
+ if (IS_ERR(group_vcs_vecs))
+ return PTR_ERR(group_vcs_vecs);
for_each_hw_engine(hwe, gt, id) {
switch (hwe->class) {
@@ -125,15 +109,6 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
}
return 0;
-
-err_group_vcs_vecs:
- kfree(group_vcs_vecs);
-err_group_bcs:
- kfree(group_bcs);
-err_group_rcs_ccs:
- kfree(group_rcs_ccs);
-
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index 0b4f12be3692..b2a0c46dfcd4 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -100,6 +100,9 @@ void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
spin_unlock_irqrestore(&irq->lock, flags);
dma_fence_end_signalling(tmp);
}
+
+ /* Safe release of the irq->lock used in dma_fence_init. */
+ synchronize_rcu();
}
void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
@@ -165,7 +168,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
return dma_fence->error ||
- !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+ !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
}
static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 48d80ffdf7bb..c17ed1ae8649 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/units.h>
@@ -19,6 +20,8 @@
#include "xe_pcode_api.h"
#include "xe_sriov.h"
#include "xe_pm.h"
+#include "xe_vsec.h"
+#include "regs/xe_pmt.h"
enum xe_hwmon_reg {
REG_TEMP,
@@ -27,6 +30,7 @@ enum xe_hwmon_reg {
REG_PKG_POWER_SKU_UNIT,
REG_GT_PERF_STATUS,
REG_PKG_ENERGY_STATUS,
+ REG_FAN_SPEED,
};
enum xe_hwmon_reg_operation {
@@ -42,6 +46,29 @@ enum xe_hwmon_channel {
CHANNEL_MAX,
};
+enum xe_fan_channel {
+ FAN_1,
+ FAN_2,
+ FAN_3,
+ FAN_MAX,
+};
+
+/* Attribute index for powerX_xxx_interval sysfs entries */
+enum sensor_attr_power {
+ SENSOR_INDEX_PSYS_PL1,
+ SENSOR_INDEX_PKG_PL1,
+ SENSOR_INDEX_PSYS_PL2,
+ SENSOR_INDEX_PKG_PL2,
+};
+
+/*
+ * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
+ * not supported and below are SKU units to be used.
+ */
+#define PWR_UNIT 0x3
+#define ENERGY_UNIT 0xe
+#define TIME_UNIT 0xa
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -51,6 +78,19 @@ enum xe_hwmon_channel {
#define SF_ENERGY 1000000 /* microjoules */
#define SF_TIME 1000 /* milliseconds */
+/*
+ * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
+ */
+#define PL1_HWMON_ATTR hwmon_power_max
+#define PL2_HWMON_ATTR hwmon_power_cap
+
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2")
+
+/*
+ * Timeout for power limit write mailbox command.
+ */
+#define PL_WRITE_MBX_TIMEOUT_MS (1)
+
/**
* struct xe_hwmon_energy_info - to accumulate energy
*/
@@ -62,6 +102,16 @@ struct xe_hwmon_energy_info {
};
/**
+ * struct xe_hwmon_fan_info - to cache previous fan reading
+ */
+struct xe_hwmon_fan_info {
+ /** @reg_val_prev: previous fan reg val */
+ u32 reg_val_prev;
+ /** @time_prev: previous timestamp */
+ u64 time_prev;
+};
+
+/**
* struct xe_hwmon - xe hwmon data structure
*/
struct xe_hwmon {
@@ -79,8 +129,89 @@ struct xe_hwmon {
int scl_shift_time;
/** @ei: Energy info for energyN_input */
struct xe_hwmon_energy_info ei[CHANNEL_MAX];
+ /** @fi: Fan info for fanN_input */
+ struct xe_hwmon_fan_info fi[FAN_MAX];
+ /** @boot_power_limit_read: is boot power limits read */
+ bool boot_power_limit_read;
+ /** @pl1_on_boot: power limit PL1 on boot */
+ u32 pl1_on_boot[CHANNEL_MAX];
+ /** @pl2_on_boot: power limit PL2 on boot */
+ u32 pl2_on_boot[CHANNEL_MAX];
+
};
+static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
+ u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0 = 0, val1 = 0;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret) {
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ *uval = 0;
+ return ret;
+ }
+
+ /* return the value only if limit is enabled */
+ if (attr == PL1_HWMON_ATTR)
+ *uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == PL2_HWMON_ATTR)
+ *uval = (val1 & PWR_LIM_EN) ? val1 : 0;
+ else if (attr == hwmon_power_label)
+ *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0;
+ else
+ *uval = 0;
+
+ return ret;
+}
+
+static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
+ u32 clr, u32 set)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0, val1;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+
+ if (attr == PL1_HWMON_ATTR)
+ val0 = (val0 & ~clr) | set;
+ else if (attr == PL2_HWMON_ATTR)
+ val1 = (val1 & ~clr) | set;
+ else
+ return -EIO;
+
+ ret = xe_pcode_write64_timeout(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ WRITE_PSYSGPU_POWER_LIMIT :
+ WRITE_PACKAGE_POWER_LIMIT, 0),
+ val0, val1, PL_WRITE_MBX_TIMEOUT_MS);
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ return ret;
+}
+
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
@@ -101,29 +232,19 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
}
break;
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_RAPL_LIMIT;
- else
- return BMG_PLATFORM_POWER_LIMIT;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_RAPL_LIMIT;
- } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_RAPL_LIMIT;
- }
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_POWER_SKU;
else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU_UNIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
else if (xe->info.platform == XE_DG2)
return PCU_CR_PACKAGE_POWER_SKU_UNIT;
@@ -133,17 +254,20 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_ENERGY_STATUS;
- else
- return BMG_PLATFORM_ENERGY_STATUS;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
return PVC_GT0_PLATFORM_ENERGY_STATUS;
} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
return PCU_CR_PACKAGE_ENERGY_STATUS;
}
break;
+ case REG_FAN_SPEED:
+ if (channel == FAN_1)
+ return BMG_FAN_1_SPEED;
+ else if (channel == FAN_2)
+ return BMG_FAN_2_SPEED;
+ else if (channel == FAN_3)
+ return BMG_FAN_3_SPEED;
+ break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
@@ -152,7 +276,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return XE_REG(0);
}
-#define PL1_DISABLE 0
+#define PL_DISABLE 0
/*
* HW allows arbitrary PL1 limits to be set but silently clamps these values to
@@ -160,94 +284,143 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val, min, max;
+ u64 reg_val = 0, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ mutex_lock(&hwmon->hwmon_lock);
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- return;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ } else {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
}
- mutex_lock(&hwmon->hwmon_lock);
-
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- /* Check if PL1 limit is disabled */
- if (!(reg_val & PKG_PWR_LIM_1_EN)) {
- *value = PL1_DISABLE;
+ /* Check if PL limits are disabled. */
+ if (!(reg_val & PWR_LIM_EN)) {
+ *value = PL_DISABLE;
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
- reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
+ reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
- max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
-
- if (min && max)
- *value = clamp_t(u64, *value, min, max);
+ /* For platforms with mailbox power limit support clamping would be done by pcode. */
+ if (!hwmon->xe->info.has_mbx_power_limits) {
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
+ max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+ max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+ if (min && max)
+ *value = clamp_t(u64, *value, min, max);
+ }
unlock:
mutex_unlock(&hwmon->hwmon_lock);
}
-static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u64 reg_val;
+ u32 reg_val, max;
struct xe_reg rapl_limit;
+ u64 max_supp_power_limit = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- mutex_lock(&hwmon->hwmon_lock);
+ /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */
+ if (value == PL_DISABLE) {
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
+ } else {
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
+ }
- /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
- if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
+ if (reg_val & PWR_LIM_EN) {
+ drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
}
+ /*
+ * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
+ * the supported maximum (U12.3 format).
+ * This is to avoid truncation during reg_val calculation below and ensure the valid
+ * power limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
+ if (value > max_supp_power_limit) {
+ value = max_supp_power_limit;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected %s exceeds channel %d limit\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ }
+
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ /*
+ * Clamp power limit to GPU firmware default as maximum, as an additional protection to
+ * pcode clamp.
+ */
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ max = (attr == PL1_HWMON_ATTR) ?
+ hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel];
+ max = REG_FIELD_PREP(PWR_LIM_VAL, max);
+ if (reg_val > max) {
+ reg_val = max;
+ drm_dbg(&hwmon->xe->drm,
+ "Clamping power limit to GPU firmware default 0x%x\n",
+ reg_val);
+ }
+ }
+
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+
+ if (hwmon->xe->info.has_mbx_power_limits)
+ ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
+ else
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
}
-static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel,
+ long *value)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
- u64 reg_val;
+ u32 reg_val;
+
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* PL1 is rated max if supported. */
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, &reg_val);
+ } else {
+ /*
+ * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
+ * for this register can be skipped.
+ * See xe_hwmon_power_is_visible.
+ */
+ struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+
+ reg_val = xe_mmio_read32(mmio, reg);
+ }
- /*
- * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
- * for this register can be skipped.
- * See xe_hwmon_power_is_visible.
- */
- reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -277,16 +450,37 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
- u64 reg_val;
+ u32 reg_val;
+ int ret = 0;
+
+ /* Energy is supported only for card and pkg */
+ if (channel > CHANNEL_PKG) {
+ *energy = 0;
+ return;
+ }
- reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ u64 pmt_val;
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val));
+ if (ret != sizeof(pmt_val)) {
+ drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret);
+ *energy = 0;
+ return;
+ }
+ if (channel == CHANNEL_PKG)
+ reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val);
+ else
+ reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val);
+ } else {
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
+ }
+
+ ei->accum_energy += reg_val - ei->reg_val_prev;
ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
@@ -301,23 +495,36 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int sensor_index = to_sensor_dev_attr(attr)->index;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+
+ int ret = 0;
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ if (ret) {
+ drm_err(&hwmon->xe->drm,
+ "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
+ channel, power_attr, r, ret);
+ r = 0;
+ }
+ } else {
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ }
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
- y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
/*
- * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
* = (4 | x) << (y - 2)
*
* Here (y - 2) ensures a 1.x fixed point representation of 1.x
@@ -343,15 +550,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
int ret;
- int sensor_index = to_sensor_dev_attr(attr)->index;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
/*
- * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
+ * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12.
* The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
*
* The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
@@ -375,7 +583,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
return -EINVAL;
/* val in hw units */
- val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
/*
* Convert val to 1.x * power(2,y)
@@ -390,14 +598,18 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
x = (val - (1ul << y)) << x_w >> y;
}
- rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+ rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) |
+ REG_FIELD_PREP(PWR_LIM_TIME_Y, y);
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
- PKG_PWR_LIM_1_TIME, rxy);
+ if (hwmon->xe->info.has_mbx_power_limits)
+ xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy);
+ else
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel),
+ PWR_LIM_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
@@ -406,17 +618,28 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
return count;
}
+/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_CARD);
-
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1);
+/* PKG PL1 */
static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_PKG);
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1);
+/* PSYS PL2 */
+static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2);
+/* PKG PL2 */
+static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
&sensor_dev_attr_power2_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_interval.dev_attr.attr,
NULL
};
@@ -426,10 +649,22 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
+ int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+ u32 uval = 0;
+ struct xe_reg rapl_limit;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
xe_pm_runtime_get(hwmon->xe);
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
+ } else if (power_attr != PL2_HWMON_ATTR) {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(rapl_limit))
+ uval = xe_mmio_read32(mmio, rapl_limit);
+ }
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
xe_pm_runtime_put(hwmon->xe);
@@ -449,11 +684,13 @@ static const struct attribute_group *hwmon_groups[] = {
static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT |
+ HWMON_P_CAP,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
NULL
};
@@ -480,6 +717,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
(uval & POWER_SETUP_I1_DATA_MASK));
}
+static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ /* Platforms that don't return correct value */
+ if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
+ *uval = 2;
+ return 0;
+ }
+
+ return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL);
+}
+
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
@@ -504,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
{
int ret;
u32 uval;
+ u64 max_crit_power_curr = 0;
mutex_lock(&hwmon->hwmon_lock);
+ /*
+ * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
+ * max supported value, clamp it to the command's max (U10.6 format).
+ * This is to avoid truncation during uval calculation below and ensure the valid power
+ * limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
+ if (value > max_crit_power_curr) {
+ value = max_crit_power_curr;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected exceeds channel %d limit\n",
+ channel);
+ }
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
@@ -557,23 +821,62 @@ xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
+ struct xe_reg reg;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
switch (attr) {
case hwmon_power_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? 0664 : 0;
+ case hwmon_power_cap:
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ } else if (attr != PL2_HWMON_ATTR) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ if (uval & PWR_LIM_EN) {
+ drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0664;
+ }
+ drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0;
case hwmon_power_rated_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ return 0;
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ return uval ? 0444 : 0;
+ }
case hwmon_power_crit:
- if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
- !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ if (channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0;
+ }
break;
case hwmon_power_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+
+ if (!uval) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ }
+ if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0;
+ }
+ return (uval) ? 0444 : 0;
default:
return 0;
}
@@ -585,10 +888,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
- xe_hwmon_power_max_read(hwmon, channel, val);
+ case hwmon_power_cap:
+ xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
- xe_hwmon_power_rated_max_read(hwmon, channel, val);
+ xe_hwmon_power_rated_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
@@ -601,8 +905,9 @@ static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
+ case hwmon_power_cap:
case hwmon_power_max:
- return xe_hwmon_power_max_write(hwmon, channel, val);
+ return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
default:
@@ -683,11 +988,18 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
+ long energy = 0;
+
switch (attr) {
case hwmon_energy_input:
case hwmon_energy_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ xe_hwmon_energy_get(hwmon, channel, &energy);
+ return energy ? 0444 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
+ }
default:
return 0;
}
@@ -706,6 +1018,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
}
static umode_t
+xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ u32 uval;
+
+ if (!hwmon->xe->info.has_fan_control)
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval))
+ return 0;
+
+ return channel < uval ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
+ u64 rotations, time_now, time;
+ u32 reg_val;
+ int ret = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel));
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from PWM fan with the scale
+ * of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int
+xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return xe_hwmon_fan_input_read(hwmon, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -730,6 +1111,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
case hwmon_energy:
ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
+ break;
default:
ret = 0;
break;
@@ -765,6 +1149,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_energy:
ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -842,23 +1229,57 @@ static void
xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- long energy;
+ long energy, fan_speed;
u64 val_sku_unit = 0;
int channel;
struct xe_reg pkg_power_sku_unit;
- /*
- * The contents of register PKG_POWER_SKU_UNIT do not change,
- * so read it once and store the shift values.
- */
- pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
- if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
- hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
- hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
- hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* Check if GPU firmware support mailbox power limits commands. */
+ if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl1_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl1_on_boot[CHANNEL_PKG]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl2_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl2_on_boot[CHANNEL_PKG])) {
+ drm_warn(&hwmon->xe->drm,
+ "Failed to read power limits, check GPU firmware !\n");
+ } else {
+ drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
+ /* Write default limits to read from pcode from now on. */
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_PKG]);
+ hwmon->scl_shift_power = PWR_UNIT;
+ hwmon->scl_shift_energy = ENERGY_UNIT;
+ hwmon->scl_shift_time = TIME_UNIT;
+ hwmon->boot_power_limit_read = true;
+ }
+ } else {
+ drm_info(&hwmon->xe->drm, "Using register for power limits\n");
+ /*
+ * The contents of register PKG_POWER_SKU_UNIT do not change,
+ * so read it once and store the shift values.
+ */
+ pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
+ if (xe_reg_is_valid(pkg_power_sku_unit)) {
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
+ hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
+ hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ }
}
-
/*
* Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
* first value of the energy register read
@@ -866,6 +1287,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
for (channel = 0; channel < CHANNEL_MAX; channel++)
if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
xe_hwmon_energy_get(hwmon, channel, &energy);
+
+ /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
+ for (channel = 0; channel < FAN_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel))
+ xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
static void xe_hwmon_mutex_destroy(void *arg)
@@ -918,4 +1344,4 @@ int xe_hwmon_register(struct xe_device *xe)
return 0;
}
-
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
new file mode 100644
index 000000000000..bc7dc2099470
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Intel Xe I2C attached Microcontroller Units (MCU)
+ *
+ * Copyright (C) 2025 Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "regs/xe_i2c_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_i2c.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+
+/**
+ * DOC: Xe I2C devices
+ *
+ * Register a platform device for the I2C host controller (Synpsys DesignWare
+ * I2C) if the registers of that controller are mapped to the MMIO, and also the
+ * I2C client device for the Add-In Management Controller (the MCU) attached to
+ * the host controller.
+ *
+ * See drivers/i2c/busses/i2c-designware-* for more information on the I2C host
+ * controller.
+ */
+
+static const char adapter_name[] = "i2c_designware";
+
+static const struct property_entry xe_i2c_adapter_properties[] = {
+ PROPERTY_ENTRY_STRING("compatible", "intel,xe-i2c"),
+ PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_FAST_MODE_PLUS_FREQ),
+ { }
+};
+
+static inline void xe_i2c_read_endpoint(struct xe_mmio *mmio, void *ep)
+{
+ u32 *val = ep;
+
+ val[0] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_PREFIX);
+ val[1] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_POSTFIX);
+}
+
+static void xe_i2c_client_work(struct work_struct *work)
+{
+ struct xe_i2c *i2c = container_of(work, struct xe_i2c, work);
+ struct i2c_board_info info = {
+ .type = "amc",
+ .flags = I2C_CLIENT_HOST_NOTIFY,
+ .addr = i2c->ep.addr[1],
+ };
+
+ i2c->client[0] = i2c_new_client_device(i2c->adapter, &info);
+}
+
+static int xe_i2c_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct xe_i2c *i2c = container_of(nb, struct xe_i2c, bus_notifier);
+ struct i2c_adapter *adapter = i2c_verify_adapter(data);
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ adapter && dev->parent == &i2c->pdev->dev) {
+ i2c->adapter = adapter;
+ schedule_work(&i2c->work);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int xe_i2c_register_adapter(struct xe_i2c *i2c)
+{
+ struct pci_dev *pci = to_pci_dev(i2c->drm_dev);
+ struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ /*
+ * Not using platform_device_register_full() here because we don't have
+ * a handle to the platform_device before it returns. xe_i2c_notifier()
+ * uses that handle, but it may be called before
+ * platform_device_register_full() is done.
+ */
+ pdev = platform_device_alloc(adapter_name, pci_dev_id(pci));
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto err_fwnode_remove;
+ }
+
+ if (i2c->adapter_irq) {
+ struct resource res;
+
+ res = DEFINE_RES_IRQ_NAMED(i2c->adapter_irq, "xe_i2c");
+
+ ret = platform_device_add_resources(pdev, &res, 1);
+ if (ret)
+ goto err_pdev_put;
+ }
+
+ pdev->dev.parent = i2c->drm_dev;
+ pdev->dev.fwnode = fwnode;
+ i2c->adapter_node = fwnode;
+ i2c->pdev = pdev;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err_pdev_put;
+
+ return 0;
+
+err_pdev_put:
+ platform_device_put(pdev);
+err_fwnode_remove:
+ fwnode_remove_software_node(fwnode);
+
+ return ret;
+}
+
+static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
+{
+ platform_device_unregister(i2c->pdev);
+ fwnode_remove_software_node(i2c->adapter_node);
+}
+
+/**
+ * xe_i2c_irq_handler: Handler for I2C interrupts
+ * @xe: xe device instance
+ * @master_ctl: interrupt register
+ *
+ * Forward interrupts generated by the I2C host adapter to the I2C host adapter
+ * driver.
+ */
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->i2c || !xe->i2c->adapter_irq)
+ return;
+
+ if (master_ctl & I2C_IRQ)
+ generic_handle_irq_safe(xe->i2c->adapter_irq);
+}
+
+static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops xe_i2c_irq_ops = {
+ .map = xe_i2c_irq_map,
+};
+
+static int xe_i2c_create_irq(struct xe_i2c *i2c)
+{
+ struct irq_domain *domain;
+
+ if (!(i2c->ep.capabilities & XE_I2C_EP_CAP_IRQ))
+ return 0;
+
+ domain = irq_domain_create_linear(dev_fwnode(i2c->drm_dev), 1, &xe_i2c_irq_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ i2c->adapter_irq = irq_create_mapping(domain, 0);
+ i2c->irqdomain = domain;
+
+ return 0;
+}
+
+static void xe_i2c_remove_irq(struct xe_i2c *i2c)
+{
+ if (!i2c->irqdomain)
+ return;
+
+ irq_dispose_mapping(i2c->adapter_irq);
+ irq_domain_remove(i2c->irqdomain);
+}
+
+static int xe_i2c_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct xe_i2c *i2c = context;
+
+ *val = xe_mmio_read32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET));
+
+ return 0;
+}
+
+static int xe_i2c_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct xe_i2c *i2c = context;
+
+ xe_mmio_write32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET), val);
+
+ return 0;
+}
+
+static const struct regmap_config i2c_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = xe_i2c_read,
+ .reg_write = xe_i2c_write,
+ .fast_io = true,
+};
+
+void xe_i2c_pm_suspend(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ if (d3cold)
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+static void xe_i2c_remove(void *data)
+{
+ struct xe_i2c *i2c = data;
+ unsigned int i;
+
+ for (i = 0; i < XE_I2C_MAX_CLIENTS; i++)
+ i2c_unregister_device(i2c->client[i]);
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ xe_i2c_unregister_adapter(i2c);
+ xe_i2c_remove_irq(i2c);
+}
+
+/**
+ * xe_i2c_probe: Probe the I2C host adapter and the I2C clients attached to it
+ * @xe: xe device instance
+ *
+ * Register all the I2C devices described in the I2C Endpoint data structure.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int xe_i2c_probe(struct xe_device *xe)
+{
+ struct device *drm_dev = xe->drm.dev;
+ struct xe_i2c_endpoint ep;
+ struct regmap *regmap;
+ struct xe_i2c *i2c;
+ int ret;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep);
+ if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return 0;
+
+ i2c = devm_kzalloc(drm_dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ INIT_WORK(&i2c->work, xe_i2c_client_work);
+ i2c->mmio = xe_root_tile_mmio(xe);
+ i2c->drm_dev = drm_dev;
+ i2c->ep = ep;
+ xe->i2c = i2c;
+
+ /* PCI PM isn't aware of this device, bring it up and match it with SGUnit state. */
+ xe_i2c_pm_resume(xe, true);
+
+ regmap = devm_regmap_init(drm_dev, NULL, i2c, &i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ i2c->bus_notifier.notifier_call = xe_i2c_notifier;
+ ret = bus_register_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ if (ret)
+ return ret;
+
+ ret = xe_i2c_create_irq(i2c);
+ if (ret)
+ goto err_unregister_notifier;
+
+ ret = xe_i2c_register_adapter(i2c);
+ if (ret)
+ goto err_remove_irq;
+
+ return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c);
+
+err_remove_irq:
+ xe_i2c_remove_irq(i2c);
+
+err_unregister_notifier:
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
new file mode 100644
index 000000000000..b767ed8ce52b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_H_
+#define _XE_I2C_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_adapter;
+struct i2c_client;
+struct irq_domain;
+struct platform_device;
+struct xe_device;
+struct xe_mmio;
+
+#define XE_I2C_MAX_CLIENTS 3
+
+#define XE_I2C_EP_COOKIE_DEVICE 0xde
+
+/* Endpoint Capabilities */
+#define XE_I2C_EP_CAP_IRQ BIT(0)
+
+struct xe_i2c_endpoint {
+ u8 cookie;
+ u8 capabilities;
+ u16 addr[XE_I2C_MAX_CLIENTS];
+};
+
+struct xe_i2c {
+ struct fwnode_handle *adapter_node;
+ struct platform_device *pdev;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client[XE_I2C_MAX_CLIENTS];
+
+ struct notifier_block bus_notifier;
+ struct work_struct work;
+
+ struct irq_domain *irqdomain;
+ int adapter_irq;
+
+ struct xe_i2c_endpoint ep;
+ struct device *drm_dev;
+
+ struct xe_mmio *mmio;
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+int xe_i2c_probe(struct xe_device *xe);
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_i2c_pm_suspend(struct xe_device *xe);
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
+#else
+static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
+static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5362d3174b06..5df5b8c2a3e4 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,10 +18,12 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_pxp.h"
#include "xe_sriov.h"
+#include "xe_tile.h"
/*
* Interrupt registers for a unit are always consecutive and ordered
@@ -160,7 +162,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
dmask = irqs << 16 | irqs;
smask = irqs << 16;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/* Enable interrupts for each engine class */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
if (ccs_mask)
@@ -260,7 +262,7 @@ gt_engine_identity(struct xe_device *xe,
static void
gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
{
- if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
+ if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
@@ -476,6 +478,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
+ xe_i2c_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
}
@@ -550,7 +553,7 @@ static void xelp_irq_reset(struct xe_tile *tile)
static void dg1_irq_reset(struct xe_tile *tile)
{
- if (tile->id == 0)
+ if (xe_tile_is_root(tile))
dg1_intr_disable(tile_to_xe(tile));
gt_irq_reset(tile);
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 89393dcb53d9..a2000307d5bf 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,6 +11,7 @@
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -71,13 +72,16 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
}
lmtt_assert(lmtt, xe_bo_is_vram(bo));
+ lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
+
+ xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo));
pt->level = level;
pt->bo = bo;
@@ -91,6 +95,9 @@ out:
static void lmtt_pt_free(struct xe_lmtt_pt *pt)
{
+ lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
+ pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
+
xe_bo_unpin_map_no_vm(pt->bo);
kfree(pt);
}
@@ -216,6 +223,58 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
lmtt_setup_dir_ptr(lmtt);
}
+static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tile *tile = lmtt_to_tile(lmtt);
+ struct xe_gt *gt;
+ int result = 0;
+ int err;
+ u8 id;
+
+ for_each_gt_on_tile(gt, tile, id) {
+ xe_gt_tlb_invalidation_fence_init(gt, fence, true);
+ err = xe_gt_tlb_invalidation_all(gt, fence);
+ result = result ?: err;
+ fence++;
+ }
+
+ lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result);
+
+ /*
+ * It is fine to wait for all fences, even for those which covers the
+ * invalidation request that failed, as such fence should be already
+ * marked as signaled.
+ */
+ fence = fences;
+ for_each_gt_on_tile(gt, tile, id)
+ xe_gt_tlb_invalidation_fence_wait(fence++);
+
+ return result;
+}
+
+/**
+ * xe_lmtt_invalidate_hw - Invalidate LMTT hardware.
+ * @lmtt: the &xe_lmtt to invalidate
+ *
+ * Send requests to all GuCs on this tile to invalidate all TLBs.
+ *
+ * This function should be called only when running as a PF driver.
+ */
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_device *xe = lmtt_to_xe(lmtt);
+ int err;
+
+ lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+
+ err = lmtt_invalidate_hw(lmtt);
+ if (err)
+ xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
+ lmtt_to_tile(lmtt)->id, ERR_PTR(err));
+}
+
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
u64 pte, unsigned int idx)
{
@@ -226,9 +285,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
switch (lmtt->ops->lmtt_pte_size(level)) {
case sizeof(u32):
+ lmtt_assert(lmtt, !overflows_type(pte, u32));
+ lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
+
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
break;
case sizeof(u64):
+ lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
+
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
break;
default:
@@ -265,6 +329,7 @@ static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
return;
lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
+ lmtt_invalidate_hw(lmtt);
lmtt_assert(lmtt, pd->level > 0);
lmtt_assert(lmtt, pt->level == pd->level - 1);
@@ -386,11 +451,11 @@ static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo
u64 addr, vram_offset;
lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
- lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size));
+ lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size));
lmtt_assert(lmtt, xe_bo_is_vram(bo));
vram_offset = vram_region_gpu_offset(bo->ttm.resource);
- xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
+ xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
while (cur.remaining) {
addr = xe_res_dma(&cur);
addr += vram_offset; /* XXX */
diff --git a/drivers/gpu/drm/xe/xe_lmtt.h b/drivers/gpu/drm/xe/xe_lmtt.h
index cb10ef994db6..75a234fbf367 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.h
+++ b/drivers/gpu/drm/xe/xe_lmtt.h
@@ -15,6 +15,7 @@ struct xe_lmtt_ops;
#ifdef CONFIG_PCI_IOV
int xe_lmtt_init(struct xe_lmtt *lmtt);
void xe_lmtt_init_hw(struct xe_lmtt *lmtt);
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt);
int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index df3ceddede07..6d38411bdeba 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -24,6 +24,7 @@
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_trace_lrc.h"
#include "xe_vm.h"
@@ -37,7 +38,34 @@
#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
+#define LRC_WA_BB_SIZE SZ_4K
+
+/*
+ * Layout of the LRC and associated data allocated as
+ * lrc->bo:
+ *
+ * Region Size
+ * +============================+=================================+ <- __xe_lrc_ring_offset()
+ * | Ring | ring_size, see |
+ * | | xe_lrc_init() |
+ * +============================+=================================+ <- __xe_lrc_pphwsp_offset()
+ * | PPHWSP (includes SW state) | 4K |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_regs_offset()
+ * | Engine Context Image | n * 4K, see |
+ * | | xe_gt_lrc_size() |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_indirect_ring_offset()
+ * | Indirect Ring State Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_RING_STATE |
+ * +============================+=================================+ <- __xe_lrc_indirect_ctx_offset()
+ * | Indirect Context Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_CTX |
+ * +============================+=================================+ <- __xe_lrc_wa_bb_offset()
+ * | WA BB Per Ctx | 4k |
+ * +============================+=================================+ <- xe_bo_size(lrc->bo)
+ */
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
@@ -45,24 +73,33 @@ lrc_to_xe(struct xe_lrc *lrc)
return gt_to_xe(lrc->fence_ctx.gt);
}
+static bool
+gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
+{
+ return false;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
{
struct xe_device *xe = gt_to_xe(gt);
size_t size;
+ /* Per-process HW status page (PPHWSP) */
+ size = LRC_PPHWSP_SIZE;
+
+ /* Engine context image */
switch (class) {
case XE_ENGINE_CLASS_RENDER:
if (GRAPHICS_VER(xe) >= 20)
- size = 4 * SZ_4K;
+ size += 3 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
case XE_ENGINE_CLASS_COMPUTE:
- /* 14 pages since graphics_ver == 11 */
if (GRAPHICS_VER(xe) >= 20)
- size = 3 * SZ_4K;
+ size += 2 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
default:
WARN(1, "Unknown engine class: %d", class);
@@ -71,7 +108,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
case XE_ENGINE_CLASS_OTHER:
- size = 2 * SZ_4K;
+ size += 1 * SZ_4K;
}
/* Add indirect ring state page */
@@ -576,8 +613,6 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
-
- /* TODO: Timestamp */
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
@@ -649,8 +684,8 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_SEQNO_PPHWSP_OFFSET 512
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 1024
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_PPHWSP_SIZE SZ_4K
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -684,7 +719,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
- /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ /* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@@ -694,15 +729,40 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
+static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc)
+{
+ return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET;
+}
+
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
+static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
+{
+ return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32);
+}
+
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
- /* Indirect ring state page is at the very end of LRC */
- return lrc->size - LRC_INDIRECT_RING_STATE_SIZE;
+ u32 offset = xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE -
+ LRC_INDIRECT_RING_STATE_SIZE;
+
+ if (lrc->flags & XE_LRC_FLAG_INDIRECT_CTX)
+ offset -= LRC_INDIRECT_CTX_BO_SIZE;
+
+ return offset;
+}
+
+static inline u32 __xe_lrc_indirect_ctx_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE - LRC_INDIRECT_CTX_BO_SIZE;
+}
+
+static inline u32 __xe_lrc_wa_bb_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE;
}
#define DECL_MAP_ADDR_HELPERS(elem) \
@@ -726,8 +786,10 @@ DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
+DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
+DECL_MAP_ADDR_HELPERS(engine_id)
#undef DECL_MAP_ADDR_HELPERS
@@ -743,18 +805,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
}
/**
+ * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: ctx timestamp udw GGTT address
+ */
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+}
+
+/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
+ u32 ldw, udw = 0;
map = __xe_lrc_ctx_timestamp_map(lrc);
- return xe_map_read32(xe, &map);
+ ldw = xe_map_read32(xe, &map);
+
+ if (xe->info.has_64bit_timestamp) {
+ map = __xe_lrc_ctx_timestamp_udw_map(lrc);
+ udw = xe_map_read32(xe, &map);
+ }
+
+ return (u64)udw << 32 | ldw;
}
/**
@@ -864,7 +945,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
- u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+ u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@@ -873,76 +954,285 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
static void xe_lrc_finish(struct xe_lrc *lrc)
{
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
- xe_bo_lock(lrc->bo, false);
- xe_bo_unpin(lrc->bo);
- xe_bo_unlock(lrc->bo);
- xe_bo_put(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bo);
+}
+
+/*
+ * wa_bb_setup_utilization() - Write commands to wa bb to assist
+ * in calculating active context run ticks.
+ *
+ * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
+ * context, but only gets updated when the context switches out. In order to
+ * check how long a context has been active before it switches out, two things
+ * are required:
+ *
+ * (1) Determine if the context is running:
+ * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in
+ * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is
+ * initialized. During a query, we just check for this value to determine if the
+ * context is active. If the context switched out, it would overwrite this
+ * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as
+ * the last part of context restore, so reusing this LRC location will not
+ * clobber anything.
+ *
+ * (2) Calculate the time that the context has been active for:
+ * The CTX_TIMESTAMP ticks only when the context is active. If a context is
+ * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization.
+ * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific
+ * engine instance. Since we do not know which instance the context is running
+ * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and
+ * store it in the PPHSWP.
+ */
+#define CONTEXT_ACTIVE 1ULL
+static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch,
+ size_t max_len)
+{
+ u32 *cmd = batch;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
+
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ *cmd++ = ENGINE_ID(0).addr;
+ *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc);
+ *cmd++ = 0;
+
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = lower_32_bits(CONTEXT_ACTIVE);
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp) {
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = upper_32_bits(CONTEXT_ACTIVE);
+ }
+
+ return cmd - batch;
+}
+
+struct bo_setup {
+ ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_size);
+};
+
+struct bo_setup_state {
+ /* Input: */
+ struct xe_lrc *lrc;
+ struct xe_hw_engine *hwe;
+ size_t max_size;
+ size_t reserve_dw;
+ unsigned int offset;
+ const struct bo_setup *funcs;
+ unsigned int num_funcs;
+
+ /* State: */
+ u32 *buffer;
+ u32 *ptr;
+ unsigned int written;
+};
+
+static int setup_bo(struct bo_setup_state *state)
+{
+ ssize_t remain;
+
+ if (state->lrc->bo->vmap.is_iomem) {
+ state->buffer = kmalloc(state->max_size, GFP_KERNEL);
+ if (!state->buffer)
+ return -ENOMEM;
+ state->ptr = state->buffer;
+ } else {
+ state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
+ state->buffer = NULL;
+ }
+
+ remain = state->max_size / sizeof(u32);
+
+ for (size_t i = 0; i < state->num_funcs; i++) {
+ ssize_t len = state->funcs[i].setup(state->lrc, state->hwe,
+ state->ptr, remain);
+
+ remain -= len;
+
+ /*
+ * Caller has asked for at least reserve_dw to remain unused.
+ */
+ if (len < 0 ||
+ xe_gt_WARN_ON(state->lrc->gt, remain < state->reserve_dw))
+ goto fail;
+
+ state->ptr += len;
+ state->written += len;
+ }
+
+ return 0;
+
+fail:
+ kfree(state->buffer);
+ return -ENOSPC;
+}
+
+static void finish_bo(struct bo_setup_state *state)
+{
+ if (!state->buffer)
+ return;
+
+ xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
+ state->offset, state->buffer,
+ state->written * sizeof(u32));
+ kfree(state->buffer);
}
-#define PVC_CTX_ASID (0x2e + 1)
-#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static const struct bo_setup funcs[] = {
+ { .setup = setup_utilization_wa },
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = LRC_WA_BB_SIZE,
+ .reserve_dw = 1,
+ .offset = __xe_lrc_wa_bb_offset(lrc),
+ .funcs = funcs,
+ .num_funcs = ARRAY_SIZE(funcs),
+ };
+ int ret;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ *state.ptr++ = MI_BATCH_BUFFER_END;
+ state.written++;
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bo) + state.offset + 1);
+
+ return 0;
+}
+
+static int
+setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static struct bo_setup rcs_funcs[] = {
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = (63 * 64) /* max 63 cachelines */,
+ .offset = __xe_lrc_indirect_ctx_offset(lrc),
+ };
+ int ret;
+
+ if (!(lrc->flags & XE_LRC_FLAG_INDIRECT_CTX))
+ return 0;
+
+ if (hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE) {
+ state.funcs = rcs_funcs;
+ state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ }
+
+ if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
+ return 0;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ /*
+ * Align to 64B cacheline so there's no garbage at the end for CS to
+ * execute: size for indirect ctx must be a multiple of 64.
+ */
+ while (state.written & 0xf) {
+ *state.ptr++ = MI_NOOP;
+ state.written++;
+ }
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX,
+ (xe_bo_ggtt_addr(lrc->bo) + state.offset) |
+ /* Size in CLs. */
+ (state.written * sizeof(u32) / 64));
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX_OFFSET,
+ CTX_INDIRECT_CTX_OFFSET_DEFAULT);
+
+ return 0;
+}
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_vm *vm, u32 ring_size, u16 msix_vec,
u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
+ const u32 lrc_size = xe_gt_lrc_size(gt, hwe->class);
+ u32 bo_size = ring_size + lrc_size + LRC_WA_BB_SIZE;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
struct iosys_map map;
- void *init_data = NULL;
u32 arb_enable;
- u32 lrc_size;
+ u32 bo_flags;
int err;
kref_init(&lrc->refcount);
+ lrc->gt = gt;
+ lrc->size = lrc_size;
lrc->flags = 0;
- lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
+ lrc->ring.size = ring_size;
+ lrc->ring.tail = 0;
+
+ if (gt_engine_needs_indirect_ctx(gt, hwe->class)) {
+ lrc->flags |= XE_LRC_FLAG_INDIRECT_CTX;
+ bo_size += LRC_INDIRECT_CTX_BO_SIZE;
+ }
+
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
- /*
- * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
- * via VM bind calls.
- */
- lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
- lrc->size = lrc_size;
- lrc->tile = gt_to_tile(hwe->gt);
- lrc->ring.size = ring_size;
- lrc->ring.tail = 0;
- lrc->ctx_timestamp = 0;
-
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
- if (!gt->default_lrc[hwe->class]) {
- init_data = empty_lrc_data(hwe);
- if (!init_data) {
- err = -ENOMEM;
- goto err_lrc_finish;
- }
- }
-
/*
* Init Per-Process of HW status Page, LRC / context state to known
- * values
+ * values. If there's already a primed default_lrc, just copy it, otherwise
+ * it's the early submission to record the lrc: build a new empty one from
+ * scratch.
*/
map = __xe_lrc_pphwsp_map(lrc);
- if (!init_data) {
+ if (gt->default_lrc[hwe->class]) {
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
- xe_gt_lrc_size(gt, hwe->class) - LRC_PPHWSP_SIZE);
+ lrc_size - LRC_PPHWSP_SIZE);
} else {
- xe_map_memcpy_to(xe, &map, 0, init_data,
- xe_gt_lrc_size(gt, hwe->class));
+ void *init_data = empty_lrc_data(hwe);
+
+ if (!init_data) {
+ err = -ENOMEM;
+ goto err_lrc_finish;
+ }
+
+ xe_map_memcpy_to(xe, &map, 0, init_data, lrc_size);
kfree(init_data);
}
@@ -990,10 +1280,13 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+ xe_lrc_write_ctx_reg(lrc, CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
@@ -1019,6 +1312,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+ err = setup_wa_bb(lrc, hwe);
+ if (err)
+ goto err_lrc_finish;
+
+ err = setup_indirect_ctx(lrc, hwe);
+ if (err)
+ goto err_lrc_finish;
+
return 0;
err_lrc_finish:
@@ -1238,6 +1539,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
return __xe_lrc_parallel_map(lrc);
}
+/**
+ * xe_lrc_engine_id() - Read engine id value
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: context id value
+ */
+static u32 xe_lrc_engine_id(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_engine_id_map(lrc);
+ return xe_map_read32(xe, &map);
+}
+
static int instr_dw(u32 cmd_header)
{
/* GFXPIPE "SINGLE_DW" opcodes are a single dword */
@@ -1445,6 +1761,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_CLIP_MESH);
MATCH3D(3DSTATE_SBE_MESH);
MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+ MATCH3D(3DSTATE_COARSE_PIXEL);
MATCH3D(3DSTATE_DRAWING_RECTANGLE);
MATCH3D(3DSTATE_CHROMA_KEY);
@@ -1595,7 +1912,7 @@ static const struct instr_state xe_hpg_svg_state[] = {
{ .instr = CMD_3DSTATE_DRAWING_RECTANGLE, .num_dw = 4 },
};
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb)
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
{
struct xe_gt *gt = q->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
@@ -1630,7 +1947,7 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
if (!state_table) {
xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
- return;
+ return cs;
}
for (int i = 0; i < state_table_size; i++) {
@@ -1653,12 +1970,14 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
instr == CMD_3DSTATE_DRAWING_RECTANGLE)
instr = CMD_3DSTATE_DRAWING_RECTANGLE_FAST;
- bb->cs[bb->len] = instr;
+ *cs = instr;
if (!is_single_dw)
- bb->cs[bb->len] |= (num_dw - 2);
+ *cs |= (num_dw - 2);
- bb->len += num_dw;
+ cs += num_dw;
}
+
+ return cs;
}
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
@@ -1668,9 +1987,6 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- if (lrc->bo->vm)
- xe_vm_get(lrc->bo->vm);
-
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
@@ -1682,9 +1998,9 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
- snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
+ snapshot->lrc_size = lrc->size;
snapshot->lrc_snapshot = NULL;
- snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
@@ -1692,14 +2008,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{
struct xe_bo *bo;
- struct xe_vm *vm;
struct iosys_map src;
if (!snapshot)
return;
bo = snapshot->lrc_bo;
- vm = bo->vm;
snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
@@ -1719,8 +2033,6 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
xe_bo_unlock(bo);
put_bo:
xe_bo_put(bo);
- if (vm)
- xe_vm_put(vm);
}
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
@@ -1773,33 +2085,80 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return;
kvfree(snapshot->lrc_snapshot);
- if (snapshot->lrc_bo) {
- struct xe_vm *vm;
-
- vm = snapshot->lrc_bo->vm;
+ if (snapshot->lrc_bo)
xe_bo_put(snapshot->lrc_bo);
- if (vm)
- xe_vm_put(vm);
- }
+
kfree(snapshot);
}
+static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
+{
+ u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id);
+ u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id);
+ struct xe_hw_engine *hwe;
+ u64 val;
+
+ hwe = xe_gt_hw_engine(lrc->gt, class, instance, false);
+ if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe),
+ "Unexpected engine class:instance %d:%d for context utilization\n",
+ class, instance))
+ return -1;
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ val = xe_mmio_read64_2x32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+ else
+ val = xe_mmio_read32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+
+ *reg_ctx_ts = val;
+
+ return 0;
+}
+
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
- * update saved value.
+ * update saved value. With support for active contexts, the calculation may be
+ * slightly racy, so follow a read-again logic to ensure that the context is
+ * still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
{
+ u64 lrc_ts, reg_ts;
+ u32 engine_id;
+
*old_ts = lrc->ctx_timestamp;
- lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
+ if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
+ lrc->ctx_timestamp = lrc_ts;
+ goto done;
+ }
+
+ if (lrc_ts == CONTEXT_ACTIVE) {
+ engine_id = xe_lrc_engine_id(lrc);
+ if (!get_ctx_timestamp(lrc, engine_id, &reg_ts))
+ lrc->ctx_timestamp = reg_ts;
+
+ /* read lrc again to ensure context is still active */
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ }
+
+ /*
+ * If context switched out, just use the lrc_ts. Note that this needs to
+ * be a separate if condition.
+ */
+ if (lrc_ts != CONTEXT_ACTIVE)
+ lrc->ctx_timestamp = lrc_ts;
+done:
trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 0b40f349ab95..b6c8053c581b 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -112,7 +112,7 @@ void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
enum xe_engine_class);
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs);
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc);
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
@@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
@@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
*
* Returns the current LRC timestamp
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts);
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 71ecb453f811..e9883706e004 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -22,14 +22,15 @@ struct xe_lrc {
*/
struct xe_bo *bo;
- /** @size: size of lrc including any indirect ring state page */
+ /** @size: size of the lrc and optional indirect ring state */
u32 size;
- /** @tile: tile which this LRC belongs to */
- struct xe_tile *tile;
+ /** @gt: gt which this LRC belongs to */
+ struct xe_gt *gt;
/** @flags: LRC flags */
-#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
+#define XE_LRC_FLAG_INDIRECT_CTX 0x1
+#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x2
u32 flags;
/** @refcount: ref count of this lrc */
@@ -52,7 +53,7 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
- u32 ctx_timestamp;
+ u64 ctx_timestamp;
};
struct xe_lrc_snapshot;
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index f62e0c8b67ab..8d67f6ba2d95 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -78,6 +78,24 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
+#define xe_map_rd_array(xe__, map__, index__, type__) \
+ xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
+
+#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
+ xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
+
+#define xe_map_rd_array_u32(xe__, map__, index__) \
+ xe_map_rd_array(xe__, map__, index__, u32)
+
+#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
+ xe_map_wr_array(xe__, map__, index__, u32, val__)
+
+#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
+ xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
+
+#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
+ xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
+
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 404fa2a456d5..49c45ec3e83c 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc)
* This object needs to be 4KiB aligned.
*
* - _`Interrupt Source Report Page`: this is the equivalent of the
- * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * GT_INTR_DWx registers, with each bit in those registers being
* mapped to a byte here. The offsets are the same, just bytes instead
* of bits. This object needs to be cacheline aligned.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 5a3e89022c38..7d20ac4bb633 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -82,7 +82,7 @@ struct xe_migrate {
* of the instruction. Subtracting the instruction header (1 dword) and
* address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
*/
-#define MAX_PTE_PER_SDI 0x1FE
+#define MAX_PTE_PER_SDI 0x1FEU
/**
* xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
@@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
return tile->migrate->q;
}
-static void xe_migrate_fini(struct drm_device *dev, void *arg)
+static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -203,19 +203,18 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */
- xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
+ xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
- pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
@@ -237,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -248,13 +247,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
level++;
}
if (xe->info.has_usm) {
- xe_tile_assert(tile, batch->size == SZ_1M);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
- xe_tile_assert(tile, batch->size == SZ_512K);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -307,7 +306,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
+ u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
@@ -322,7 +321,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+ u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
@@ -401,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_vm *vm;
int err;
- m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -455,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
might_lock(&m->job_mutex);
fs_reclaim_release(GFP_KERNEL);
- err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
@@ -670,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u32 mocs = 0;
u32 tile_y = 0;
+ xe_gt_assert(gt, !(pitch & 3));
xe_gt_assert(gt, size / pitch <= S16_MAX);
xe_gt_assert(gt, pitch / 4 <= S16_MAX);
xe_gt_assert(gt, pitch <= U16_MAX);
@@ -768,7 +768,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it, ccs_it;
u64 src_L0_ofs, dst_L0_ofs;
u32 src_L0_pt, dst_L0_pt;
@@ -779,17 +779,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool dst_is_pltt = dst->mem_type == XE_PL_TT;
bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+ bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
- bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
return ERR_PTR(-EINVAL);
- if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
+ if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
return ERR_PTR(-EINVAL);
if (!src_is_vram)
@@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
avail_pts, avail_pts);
if (copy_system_ccs) {
+ xe_assert(xe, type_device);
ccs_size = xe_device_ccs_bytes(xe, src_L0);
batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
@@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -860,7 +863,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
xe_res_next(&src_it, src_L0);
else
- emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+ emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
&src_it, src_L0, src);
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
@@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- if (xe_migrate_needs_ccs_emit(xe))
+ if (needs_ccs_emit)
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
IS_DGFX(xe) ? src_is_vram : src_is_pltt,
dst_L0_ofs,
@@ -1061,7 +1064,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_device *xe = gt_to_xe(gt);
bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
- u64 size = bo->size;
+ u64 size = xe_bo_size(bo);
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
@@ -1073,9 +1076,9 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_only_system_ccs = true;
if (!clear_vram)
- xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
else
- xe_res_first(src, 0, bo->size, &src_it);
+ xe_res_first(src, 0, xe_bo_size(bo), &src_it);
while (size) {
u64 clear_L0_ofs;
@@ -1404,7 +1407,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
if (idx == chunk)
goto next_cmd;
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
/* Map a PT at most once */
if (pt_bo->update_index < 0)
@@ -1550,15 +1553,17 @@ static u32 pte_update_cmd_size(u64 size)
u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
+
/*
* MI_STORE_DATA_IMM command is used to update page table. Each
- * instruction can update maximumly 0x1ff pte entries. To update
- * n (n <= 0x1ff) pte entries, we need:
- * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
- * 2 dword for the page table's physical location
- * 2*n dword for value of pte to fill (each pte entry is 2 dwords)
+ * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
+ * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
+ *
+ * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
+ * - 2 dword for the page table's physical location
+ * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
*/
- num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
+ num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
num_dword += entries * 2;
return num_dword;
@@ -1574,7 +1579,7 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
- u32 chunk = min(0x1ffU, ptes);
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = pt_offset;
@@ -1601,55 +1606,63 @@ enum xe_migrate_copy_dir {
XE_MIGRATE_COPY_TO_SRAM,
};
+#define XE_CACHELINE_BYTES 64ull
+#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
- unsigned long npages,
+ unsigned long len,
+ unsigned long sram_offset,
dma_addr_t *sram_addr, u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
u32 batch_size = 2;
u64 src_L0_ofs, dst_L0_ofs;
- u64 round_update_size;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
+ unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
+ unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
+ PAGE_SIZE : 4;
int err;
- if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+ (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ return ERR_PTR(-EOPNOTSUPP);
- round_update_size = npages * PAGE_SIZE;
- batch_size += pte_update_cmd_size(round_update_size);
+ xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
+
+ batch_size += pte_update_cmd_size(len);
batch_size += EMIT_COPY_DW;
- bb = xe_bb_new(gt, batch_size, true);
+ bb = xe_bb_new(gt, batch_size, use_usm_batch);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
return ERR_PTR(err);
}
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, round_update_size);
+ sram_addr, len + sram_offset);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
- XE_PAGE_SIZE);
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
job = xe_bb_create_migration_job(m->q, bb,
- xe_migrate_batch_base(m, true),
+ xe_migrate_batch_base(m, use_usm_batch),
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
@@ -1694,7 +1707,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
dma_addr_t *src_addr,
u64 dst_addr)
{
- return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
XE_MIGRATE_COPY_TO_VRAM);
}
@@ -1715,10 +1728,205 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
u64 src_addr,
dma_addr_t *dst_addr)
{
- return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
+static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+ int len, int write)
+{
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i])
+ break;
+
+ dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+ kfree(dma_addr);
+}
+
+static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len, int write)
+{
+ dma_addr_t *dma_addr;
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
+ if (!dma_addr)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; ++i) {
+ dma_addr_t addr;
+ struct page *page;
+
+ if (is_vmalloc_addr(buf))
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ addr = dma_map_page(xe->drm.dev,
+ page, 0, PAGE_SIZE,
+ write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(xe->drm.dev, addr))
+ goto err_fault;
+
+ dma_addr[i] = addr;
+ buf += PAGE_SIZE;
+ }
+
+ return dma_addr;
+
+err_fault:
+ xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ return ERR_PTR(-EFAULT);
+}
+
+/**
+ * xe_migrate_access_memory - Access memory of a BO via GPU
+ *
+ * @m: The migration context.
+ * @bo: buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Access memory of a BO via GPU either reading in or writing from a passed in
+ * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
+ * read to or write from pointer.
+ *
+ * Returns:
+ * 0 if successful, negative error code on failure.
+ */
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_res_cursor cursor;
+ struct dma_fence *fence = NULL;
+ dma_addr_t *dma_addr;
+ unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
+ int bytes_left = len, current_page = 0;
+ void *orig_buf = buf;
+
+ xe_bo_assert_held(bo);
+
+ /* Use bounce buffer for small access and unaligned access */
+ if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
+ !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
+ int buf_offset = 0;
+ void *bounce;
+ int err;
+
+ BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+ bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
+
+ /*
+ * Less than ideal for large unaligned access but this should be
+ * fairly rare, can fixup if this becomes common.
+ */
+ do {
+ int copy_bytes = min_t(int, bytes_left,
+ XE_CACHELINE_BYTES -
+ (offset & XE_CACHELINE_MASK));
+ int ptr_offset = offset & XE_CACHELINE_MASK;
+
+ err = xe_migrate_access_memory(m, bo,
+ offset &
+ ~XE_CACHELINE_MASK,
+ bounce,
+ XE_CACHELINE_BYTES, 0);
+ if (err)
+ break;
+
+ if (write) {
+ memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
+
+ err = xe_migrate_access_memory(m, bo,
+ offset & ~XE_CACHELINE_MASK,
+ bounce,
+ XE_CACHELINE_BYTES, write);
+ if (err)
+ break;
+ } else {
+ memcpy(buf + buf_offset, bounce + ptr_offset,
+ copy_bytes);
+ }
+
+ bytes_left -= copy_bytes;
+ buf_offset += copy_bytes;
+ offset += copy_bytes;
+ } while (bytes_left);
+
+ kfree(bounce);
+ return err;
+ }
+
+ dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(dma_addr))
+ return PTR_ERR(dma_addr);
+
+ xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
+
+ do {
+ struct dma_fence *__fence;
+ u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
+ cursor.start;
+ int current_bytes;
+
+ if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
+ current_bytes = min_t(int, bytes_left,
+ MAX_PREEMPTDISABLE_TRANSFER);
+ else
+ current_bytes = min_t(int, bytes_left, cursor.size);
+
+ if (current_bytes & ~PAGE_MASK) {
+ int pitch = 4;
+
+ current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
+ }
+
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ dma_addr + current_page,
+ vram_addr, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ fence = __fence;
+ goto out_err;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+
+ buf += current_bytes;
+ offset += current_bytes;
+ current_page = (int)(buf - orig_buf) / PAGE_SIZE;
+ bytes_left -= current_bytes;
+ if (bytes_left)
+ xe_res_next(&cursor, current_bytes);
+ } while (bytes_left);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+out_err:
+ xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ return IS_ERR(fence) ? PTR_ERR(fence) : 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 6ff9a963425c..fb9839c1bae0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -112,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write);
+
#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 70a36e777546..e4db8d58ea2d 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -22,6 +22,9 @@
#include "xe_macros.h"
#include "xe_sriov.h"
#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "generated/xe_device_wa_oob.h"
static void tiles_fini(void *arg)
{
@@ -55,6 +58,7 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
+ struct xe_gt *gt;
u8 id;
/*
@@ -67,7 +71,7 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
/* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count;
+ u8 tile_count, gt_count;
u32 mtcfg;
/*
@@ -75,21 +79,24 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
* is fine as it's going to the root tile's mmio, that's
* guaranteed to be initialized earlier in xe_mmio_probe_early()
*/
- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
+ xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but
- * should be impossible with multi-tile for now:
- * multi-tile platform with standalone media doesn't
- * exist
+ * We've already setup gt_count according to the full
+ * tile count. Re-calculate it to only include the GTs
+ * that belong to the remaining tile(s).
*/
- xe->info.gt_count = xe->info.tile_count;
+ gt_count = 0;
+ for_each_gt(gt, xe, id)
+ if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
+ gt_count++;
+ xe->info.gt_count = gt_count;
}
}
@@ -128,7 +135,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
*/
xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
- if (xe->mmio.regs == NULL) {
+ if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
@@ -138,6 +145,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
/**
* xe_mmio_init() - Initialize an MMIO instance
@@ -162,7 +170,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
#define DUMMY_REG_OFFSET 0x130030
int i;
- if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
+ if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425))
return;
/* 4 dummy writes */
@@ -175,7 +183,6 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readb(mmio->regs + addr);
@@ -189,7 +196,6 @@ u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readw(mmio->regs + addr);
@@ -204,8 +210,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && mmio->sriov_vf_gt)
- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg, val);
else
writel(val, mmio->regs + addr);
}
@@ -215,11 +222,11 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
- if (!reg.vf && mmio->sriov_vf_gt)
- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg);
else
val = readl(mmio->regs + addr);
@@ -309,8 +316,8 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic, bool expect_match)
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 31dade91a089..0c737413fcb6 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -775,22 +775,23 @@ void xe_mocs_init(struct xe_gt *gt)
void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
+ enum xe_force_wake_domains domain;
struct xe_mocs_info table;
unsigned int fw_ref, flags;
flags = get_mocs_settings(xe, &table);
+ domain = flags & HAS_LNCF_MOCS ? XE_FORCEWAKE_ALL : XE_FW_GT;
xe_pm_runtime_get_noresume(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt),
- flags & HAS_LNCF_MOCS ?
- XE_FORCEWAKE_ALL : XE_FW_GT);
- if (!fw_ref)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
+
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain))
goto err_fw;
table.ops->dump(&table, flags, gt, p);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
err_fw:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 9f4632e39a1a..d9391bd08194 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -11,38 +11,57 @@
#include <drm/drm_module.h>
#include "xe_drv.h"
+#include "xe_configfs.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_pm.h"
#include "xe_observation.h"
#include "xe_sched_job.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define DEFAULT_GUC_LOG_LEVEL 3
+#else
+#define DEFAULT_GUC_LOG_LEVEL 1
+#endif
+
+#define DEFAULT_PROBE_DISPLAY true
+#define DEFAULT_VRAM_BAR_SIZE 0
+#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
+#define DEFAULT_MAX_VFS ~0
+#define DEFAULT_MAX_VFS_STR "unlimited"
+#define DEFAULT_WEDGED_MODE 1
+#define DEFAULT_SVM_NOTIFIER_SIZE 512
+
struct xe_modparam xe_modparam = {
- .probe_display = true,
- .guc_log_level = 3,
- .force_probe = CONFIG_DRM_XE_FORCE_PROBE,
- .wedged_mode = 1,
- .svm_notifier_size = 512,
+ .probe_display = DEFAULT_PROBE_DISPLAY,
+ .guc_log_level = DEFAULT_GUC_LOG_LEVEL,
+ .force_probe = DEFAULT_FORCE_PROBE,
+#ifdef CONFIG_PCI_IOV
+ .max_vfs = DEFAULT_MAX_VFS,
+#endif
+ .wedged_mode = DEFAULT_WEDGED_MODE,
+ .svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
};
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
-MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
-
-module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
-MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size in MiB, must be power of 2 "
+ "[default=" __stringify(DEFAULT_SVM_NOTIFIER_SIZE) "]");
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
-MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
+MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched "
+ "[default=" __stringify(DEFAULT_PROBE_DISPLAY) "])");
-module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size in MiB (<0=disable-resize, 0=max-needed-size, >0=force-size "
+ "[default=" __stringify(DEFAULT_VRAM_BAR_SIZE) "])");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
-MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
+MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels "
+ "[default=" __stringify(DEFAULT_GUC_LOG_LEVEL) "])");
module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
@@ -58,18 +77,21 @@ MODULE_PARM_DESC(gsc_firmware_path,
module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
MODULE_PARM_DESC(force_probe,
- "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+ "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details "
+ "[default=" DEFAULT_FORCE_PROBE "])");
#ifdef CONFIG_PCI_IOV
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
MODULE_PARM_DESC(max_vfs,
"Limit number of Virtual Functions (VFs) that could be managed. "
- "(0 = no VFs [default]; N = allow up to N VFs)");
+ "(0=no VFs; N=allow up to N VFs "
+ "[default=" DEFAULT_MAX_VFS_STR "])");
#endif
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
MODULE_PARM_DESC(wedged_mode,
- "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+ "Module's default policy for the wedged mode (0=never, 1=upon-critical-errors, 2=upon-any-hang "
+ "[default=" __stringify(DEFAULT_WEDGED_MODE) "])");
static int xe_check_nomodeset(void)
{
@@ -89,6 +111,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_check_nomodeset,
},
{
+ .init = xe_configfs_init,
+ .exit = xe_configfs_exit,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 84339e509c80..5a3bfea8b7b4 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -12,7 +12,6 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
- bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
new file mode 100644
index 000000000000..61b0a1531a53
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/intel_dg_nvm_aux.h>
+#include <linux/pci.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_mmio.h"
+#include "xe_nvm.h"
+#include "regs/xe_gsc_regs.h"
+#include "xe_sriov.h"
+
+#define GEN12_GUNIT_NVM_BASE 0x00102040
+#define GEN12_DEBUG_NVM_BASE 0x00101018
+
+#define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
+
+#define GEN12_GUNIT_NVM_SIZE 0x80
+#define GEN12_DEBUG_NVM_SIZE 0x4
+
+#define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
+
+#define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
+
+static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
+ [0] = { .name = "DESCRIPTOR", },
+ [2] = { .name = "GSC", },
+ [9] = { .name = "PADDING", },
+ [11] = { .name = "OptionROM", },
+ [12] = { .name = "DAM", },
+};
+
+static void xe_nvm_release_dev(struct device *dev)
+{
+}
+
+static bool xe_nvm_non_posted_erase(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return false;
+ return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ NVM_NON_POSTED_ERASE_CHICKEN_BIT);
+}
+
+static bool xe_nvm_writable_override(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ bool writable_override;
+ resource_size_t base;
+
+ switch (xe->info.platform) {
+ case XE_BATTLEMAGE:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_PVC:
+ base = PVC_GSC_HECI2_BASE;
+ break;
+ case XE_DG2:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_DG1:
+ base = DG1_GSC_HECI2_BASE;
+ break;
+ default:
+ drm_err(&xe->drm, "Unknown platform\n");
+ return true;
+ }
+
+ writable_override =
+ !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ HECI_FW_STATUS_2_NVM_ACCESS_MODE);
+ if (writable_override)
+ drm_info(&xe->drm, "NVM access overridden by jumper\n");
+ return writable_override;
+}
+
+int xe_nvm_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct auxiliary_device *aux_dev;
+ struct intel_dg_nvm_dev *nvm;
+ int ret;
+
+ if (!xe->info.has_gsc_nvm)
+ return 0;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ /* Nvm pointer should be NULL here */
+ if (WARN_ON(xe->nvm))
+ return -EFAULT;
+
+ xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!xe->nvm)
+ return -ENOMEM;
+
+ nvm = xe->nvm;
+
+ nvm->writable_override = xe_nvm_writable_override(xe);
+ nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
+ nvm->bar.parent = &pdev->resource[0];
+ nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
+ nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
+ nvm->bar.flags = IORESOURCE_MEM;
+ nvm->bar.desc = IORES_DESC_NONE;
+ nvm->regions = regions;
+
+ nvm->bar2.parent = &pdev->resource[0];
+ nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
+ nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
+ nvm->bar2.flags = IORESOURCE_MEM;
+ nvm->bar2.desc = IORES_DESC_NONE;
+
+ aux_dev = &nvm->aux_dev;
+
+ aux_dev->name = "nvm";
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = xe_nvm_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
+ goto err;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
+ auxiliary_device_uninit(aux_dev);
+ goto err;
+ }
+ return 0;
+
+err:
+ kfree(nvm);
+ xe->nvm = NULL;
+ return ret;
+}
+
+void xe_nvm_fini(struct xe_device *xe)
+{
+ struct intel_dg_nvm_dev *nvm = xe->nvm;
+
+ if (!xe->info.has_gsc_nvm)
+ return;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /* Nvm pointer should not be NULL here */
+ if (WARN_ON(!nvm))
+ return;
+
+ auxiliary_device_delete(&nvm->aux_dev);
+ auxiliary_device_uninit(&nvm->aux_dev);
+ kfree(nvm);
+ xe->nvm = NULL;
+}
diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h
new file mode 100644
index 000000000000..7f3d5f57bed0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_NVM_H__
+#define __XE_NVM_H__
+
+struct xe_device;
+
+int xe_nvm_init(struct xe_device *xe);
+
+void xe_nvm_fini(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 7ffc98f67e69..5729e7d3e335 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -43,6 +43,12 @@
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oam_unit_type {
+ XE_OAM_UNIT_SAG,
+ XE_OAM_UNIT_SCMI_0,
+ XE_OAM_UNIT_SCMI_1,
+};
+
enum xe_oa_submit_deps {
XE_OA_SUBMIT_NO_DEPS,
XE_OA_SUBMIT_ADD_DEPS,
@@ -77,7 +83,7 @@ struct xe_oa_config {
struct xe_oa_open_param {
struct xe_file *xef;
- u32 oa_unit_id;
+ struct xe_oa_unit *oa_unit;
bool sample;
u32 metric_set;
enum xe_oa_format_name oa_format;
@@ -194,7 +200,7 @@ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *l
static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
{
- return &stream->hwe->oa_unit->regs;
+ return &stream->oa_unit->regs;
}
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
@@ -397,7 +403,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ int size_exponent = __ffs(xe_bo_size(stream->oa_buffer.bo));
u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
@@ -429,7 +435,7 @@ static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* Zero out the OA buffer since we rely on zero report id and timestamp fields */
- memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size);
+ memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo));
}
static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask)
@@ -454,7 +460,7 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
{
- return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ return stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
}
@@ -475,7 +481,7 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
@@ -838,11 +844,16 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Reset PMON Enable to save power. */
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
+
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, OAM_LAT_MEASURE_ENABLE, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
{
- struct xe_oa_unit *u = stream->hwe->oa_unit;
+ struct xe_oa_unit *u = stream->oa_unit;
struct xe_gt *gt = stream->hwe->gt;
if (WARN_ON(stream != u->exclusive_stream))
@@ -1054,7 +1065,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- stream->oa_buffer.bo->size > SZ_16M ?
+ xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
}
@@ -1105,9 +1116,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
-
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, 0, OAM_LAT_MEASURE_ENABLE);
+
/* Configure OAR/OAC */
if (stream->exec_q) {
ret = xe_oa_configure_oa_context(stream, true);
@@ -1139,14 +1154,31 @@ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *n
return -EINVAL;
}
+static struct xe_oa_unit *xe_oa_lookup_oa_unit(struct xe_oa *oa, u32 oa_unit_id)
+{
+ struct xe_gt *gt;
+ int gt_id, i;
+
+ for_each_gt(gt, oa->xe, gt_id) {
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ struct xe_oa_unit *u = &gt->oa.oa_unit[i];
+
+ if (u->oa_unit_id == oa_unit_id)
+ return u;
+ }
+ }
+
+ return NULL;
+}
+
static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
- if (value >= oa->oa_unit_ids) {
+ param->oa_unit = xe_oa_lookup_oa_unit(oa, value);
+ if (!param->oa_unit) {
drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
return -EINVAL;
}
- param->oa_unit_id = value;
return 0;
}
@@ -1301,7 +1333,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -1338,7 +1370,7 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -1550,7 +1582,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = xe_bo_size(stream->oa_buffer.bo), };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1636,7 +1668,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
+ if (vma->vm_end - vma->vm_start != xe_bo_size(stream->oa_buffer.bo)) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1677,13 +1709,13 @@ static const struct file_operations xe_oa_fops = {
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
- struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS;
+ stream->oa_unit = param->oa_unit;
stream->hwe = param->hwe;
stream->gt = stream->hwe->gt;
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
@@ -1704,7 +1736,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* buffer whose size, circ_size, is a multiple of the report size
*/
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
param->oa_buffer_size -
param->oa_buffer_size % stream->oa_buffer.format->size;
@@ -1762,7 +1794,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- WRITE_ONCE(u->exclusive_stream, stream);
+ WRITE_ONCE(stream->oa_unit->exclusive_stream, stream);
hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1798,7 +1830,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
int ret;
/* We currently only allow exclusive access */
- if (param->hwe->oa_unit->exclusive_stream) {
+ if (param->oa_unit->exclusive_stream) {
drm_dbg(&oa->xe->drm, "OA unit already in use\n");
ret = -EBUSY;
goto exit;
@@ -1874,13 +1906,14 @@ static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent)
return div_u64(nom + den - 1, den);
}
-static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+static bool oa_unit_supports_oa_format(struct xe_oa_open_param *param, int type)
{
- switch (hwe->oa_unit->type) {
+ switch (param->oa_unit->type) {
case DRM_XE_OA_UNIT_TYPE_OAG:
return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR ||
type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC;
case DRM_XE_OA_UNIT_TYPE_OAM:
+ case DRM_XE_OA_UNIT_TYPE_OAM_SAG:
return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC;
default:
return false;
@@ -1899,37 +1932,48 @@ u16 xe_oa_unit_id(struct xe_hw_engine *hwe)
hwe->oa_unit->oa_unit_id : U16_MAX;
}
+/* A hwe must be assigned to stream/oa_unit for batch submissions */
static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
{
- struct xe_gt *gt;
- int i, ret = 0;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int ret = 0;
+
+ /* If not provided, OA unit defaults to OA unit 0 as per uapi */
+ if (!param->oa_unit)
+ param->oa_unit = &xe_root_mmio_gt(oa->xe)->oa.oa_unit[0];
+ /* When we have an exec_q, get hwe from the exec_q */
if (param->exec_q) {
- /* When we have an exec_q, get hwe from the exec_q */
param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class,
param->engine_instance, true);
- } else {
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
-
- /* Else just get the first hwe attached to the oa unit */
- for_each_gt(gt, oa->xe, i) {
- for_each_hw_engine(hwe, gt, id) {
- if (xe_oa_unit_id(hwe) == param->oa_unit_id) {
- param->hwe = hwe;
- goto out;
- }
- }
- }
+ if (!param->hwe || param->hwe->oa_unit != param->oa_unit)
+ goto err;
+ goto out;
}
-out:
- if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) {
- drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
- param->exec_q ? param->exec_q->class : -1,
- param->engine_instance, param->oa_unit_id);
- ret = -EINVAL;
+
+ /* Else just get the first hwe attached to the oa unit */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (hwe->oa_unit == param->oa_unit) {
+ param->hwe = hwe;
+ goto out;
+ }
}
+ /* If we still didn't find a hwe, just get one with a valid oa_unit from the same gt */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (!hwe->oa_unit)
+ continue;
+
+ param->hwe = hwe;
+ goto out;
+ }
+err:
+ drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
+ param->exec_q ? param->exec_q->class : -1,
+ param->engine_instance, param->oa_unit->oa_unit_id);
+ ret = -EINVAL;
+out:
return ret;
}
@@ -2007,7 +2051,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
f = &oa->oa_formats[param.oa_format];
if (!param.oa_format || !f->size ||
- !engine_supports_oa_format(param.hwe, f->type)) {
+ !oa_unit_supports_oa_format(&param, f->type)) {
drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n",
param.oa_format, f->type, f->size, param.hwe->class);
ret = -EINVAL;
@@ -2155,6 +2199,7 @@ static const struct xe_mmio_range gen12_oa_mux_regs[] = {
static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
+ { .start = 0xB01C, .end = 0xB01C }, /* LNCF_MISC_CONFIG_REGISTER0 */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
{ .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
@@ -2221,6 +2266,7 @@ addr_err:
kfree(oa_regs);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO);
static ssize_t show_dynamic_id(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -2280,7 +2326,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -EACCES;
}
- err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
+ err = copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -2447,20 +2493,38 @@ int xe_oa_register(struct xe_device *xe)
static u32 num_oa_units_per_gt(struct xe_gt *gt)
{
- return 1;
+ if (xe_gt_is_main_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return 1;
+ else if (!IS_DGFX(gt_to_xe(gt)))
+ return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */
+ else
+ return XE_OAM_UNIT_SCMI_1 + 1; /* SAG + SCMI_0 + SCMI_1 */
}
static u32 __hwe_oam_unit(struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) {
- /*
- * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
- * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA
- */
- xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA);
+ if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270)
+ return XE_OA_UNIT_INVALID;
+
+ xe_gt_WARN_ON(hwe->gt, xe_gt_is_main_type(hwe->gt));
+ if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20)
return 0;
- }
+ /*
+ * XE_OAM_UNIT_SAG has only GSCCS attached to it, but only on some platforms. Also
+ * GSCCS cannot be used to submit batches to program the OAM unit. Therefore we don't
+ * assign an OA unit to GSCCS. This means that XE_OAM_UNIT_SAG is exposed as an OA
+ * unit without attached engines. Fused off engines can also result in oa_unit's with
+ * num_engines == 0. OA streams can be opened on all OA units.
+ */
+ else if (hwe->engine_id == XE_HW_ENGINE_GSCCS0)
+ return XE_OA_UNIT_INVALID;
+ else if (!IS_DGFX(gt_to_xe(hwe->gt)))
+ return XE_OAM_UNIT_SCMI_0;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE)
+ return (hwe->instance / 2 & 0x1) + 1;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ return (hwe->instance & 0x1) + 1;
return XE_OA_UNIT_INVALID;
}
@@ -2474,6 +2538,7 @@ static u32 __hwe_oa_unit(struct xe_hw_engine *hwe)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ case XE_ENGINE_CLASS_OTHER:
return __hwe_oam_unit(hwe);
default:
@@ -2513,20 +2578,29 @@ static struct xe_oa_regs __oag_regs(void)
static void __xe_oa_init_oa_units(struct xe_gt *gt)
{
- const u32 mtl_oa_base[] = { 0x13000 };
+ /* Actual address is MEDIA_GT_GSI_OFFSET + oam_base_addr[i] */
+ const u32 oam_base_addr[] = {
+ [XE_OAM_UNIT_SAG] = 0x13000,
+ [XE_OAM_UNIT_SCMI_0] = 0x14000,
+ [XE_OAM_UNIT_SCMI_1] = 0x14800,
+ };
int i, num_units = gt->oa.num_oa_units;
for (i = 0; i < num_units; i++) {
struct xe_oa_unit *u = &gt->oa.oa_unit[i];
- if (gt->info.type != XE_GT_TYPE_MEDIA) {
+ if (xe_gt_is_main_type(gt)) {
u->regs = __oag_regs();
u->type = DRM_XE_OA_UNIT_TYPE_OAG;
- } else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- u->regs = __oam_regs(mtl_oa_base[i]);
- u->type = DRM_XE_OA_UNIT_TYPE_OAM;
+ } else {
+ xe_gt_assert(gt, GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270);
+ u->regs = __oam_regs(oam_base_addr[i]);
+ u->type = i == XE_OAM_UNIT_SAG && GRAPHICS_VER(gt_to_xe(gt)) >= 20 ?
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG : DRM_XE_OA_UNIT_TYPE_OAM;
}
+ u->gt = gt;
+
xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
/* Ensure MMIO trigger remains disabled till there is a stream */
@@ -2559,10 +2633,6 @@ static int xe_oa_init_gt(struct xe_gt *gt)
}
}
- /*
- * Fused off engines can result in oa_unit's with num_engines == 0. These units
- * will appear in OA unit query, but no OA streams can be opened on them.
- */
gt->oa.num_oa_units = num_oa_units;
gt->oa.oa_unit = u;
@@ -2573,17 +2643,54 @@ static int xe_oa_init_gt(struct xe_gt *gt)
return 0;
}
+static void xe_oa_print_gt_oa_units(struct xe_gt *gt)
+{
+ enum xe_hw_engine_id hwe_id;
+ struct xe_hw_engine *hwe;
+ struct xe_oa_unit *u;
+ char buf[256];
+ int i, n;
+
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ u = &gt->oa.oa_unit[i];
+ buf[0] = '\0';
+ n = 0;
+
+ for_each_hw_engine(hwe, gt, hwe_id)
+ if (xe_oa_unit_id(hwe) == u->oa_unit_id)
+ n += scnprintf(buf + n, sizeof(buf) - n, "%s ", hwe->name);
+
+ xe_gt_dbg(gt, "oa_unit %d, type %d, Engines: %s\n", u->oa_unit_id, u->type, buf);
+ }
+}
+
+static void xe_oa_print_oa_units(struct xe_oa *oa)
+{
+ struct xe_gt *gt;
+ int gt_id;
+
+ for_each_gt(gt, oa->xe, gt_id)
+ xe_oa_print_gt_oa_units(gt);
+}
+
static int xe_oa_init_oa_units(struct xe_oa *oa)
{
struct xe_gt *gt;
int i, ret;
+ /* Needed for OAM implementation here */
+ BUILD_BUG_ON(XE_OAM_UNIT_SAG != 0);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_0 != 1);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_1 != 2);
+
for_each_gt(gt, oa->xe, i) {
ret = xe_oa_init_gt(gt);
if (ret)
return ret;
}
+ xe_oa_print_oa_units(oa);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 52e33c37d5ee..2628f78c4e8d 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -95,6 +95,9 @@ struct xe_oa_unit {
/** @oa_unit_id: identifier for the OA unit */
u16 oa_unit_id;
+ /** @gt: gt associated with the OA unit */
+ struct xe_gt *gt;
+
/** @type: Type of OA unit - OAM, OAG etc. */
enum drm_xe_oa_unit_type type;
@@ -182,6 +185,9 @@ struct xe_oa_stream {
/** @gt: gt associated with the oa stream */
struct xe_gt *gt;
+ /** @oa_unit: oa unit for this stream */
+ struct xe_oa_unit *oa_unit;
+
/** @hwe: hardware engine associated with this oa stream */
struct xe_hw_engine *hwe;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 30fdbdb9341e..2e7cb99ae87a 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -103,7 +103,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
*
* Note: There is an implicit assumption in the driver that compression and
* coh_1way+ are mutually exclusive. If this is ever not true then userptr
- * and imported dma-buf from external device will have uncleared ccs state.
+ * and imported dma-buf from external device will have uncleared ccs state. See
+ * also xe_bo_needs_ccs_pages().
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -162,21 +163,35 @@ u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
}
static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -303,26 +318,6 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat_mcr(gt, table, n_entries);
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
-}
-
-static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat(gt, table, n_entries);
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
-}
-
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -375,8 +370,8 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
}
static const struct xe_pat_ops xe2_pat_ops = {
- .program_graphics = xe2lpg_program_pat,
- .program_media = xe2lpm_program_pat,
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
.dump = xe2_dump,
};
@@ -385,6 +380,9 @@ void xe_pat_init_early(struct xe_device *xe)
if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
+ xe->pat.pat_ats = &xe2_pat_ats;
+ if (IS_DGFX(xe))
+ xe->pat.pat_pta = &xe2_pat_pta;
/* Wa_16023588340. XXX: Should use XE_WA */
if (GRAPHICS_VERx100(xe) == 2001)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 818f023166d5..3c40ef426f0c 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -38,40 +38,6 @@ enum toggle_d3cold {
D3COLD_ENABLE,
};
-struct xe_subplatform_desc {
- enum xe_subplatform subplatform;
- const char *name;
- const u16 *pciidlist;
-};
-
-struct xe_device_desc {
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_graphics_ip;
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_media_ip;
-
- const char *platform_name;
- const struct xe_subplatform_desc *subplatforms;
-
- enum xe_platform platform;
-
- u8 dma_mask_size;
- u8 max_remote_tiles:2;
-
- u8 require_force_probe:1;
- u8 is_dgfx:1;
-
- u8 has_display:1;
- u8 has_heci_gscfi:1;
- u8 has_heci_cscfi:1;
- u8 has_llc:1;
- u8 has_pxp:1;
- u8 has_sriov:1;
- u8 skip_guc_pc:1;
- u8 skip_mtcfg:1;
- u8 skip_pcode:1;
-};
-
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
@@ -137,9 +103,9 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
- .has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
+ .has_64bit_timestamp = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -176,9 +142,11 @@ static const struct xe_ip graphics_ips[] = {
{ 1271, "Xe_LPG", &graphics_xelpg },
{ 1274, "Xe_LPG+", &graphics_xelpg },
{ 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2002, "Xe2_HPG", &graphics_xe2 },
{ 2004, "Xe2_LPG", &graphics_xe2 },
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
+ { 3003, "Xe3_LPG", &graphics_xe2 },
};
/* Pre-GMDID Media IPs */
@@ -191,6 +159,7 @@ static const struct xe_ip media_ips[] = {
{ 1301, "Xe2_HPM", &media_xelpmp },
{ 2000, "Xe2_LPM", &media_xelpmp },
{ 3000, "Xe3_LPM", &media_xelpmp },
+ { 3002, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
@@ -200,6 +169,7 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -210,6 +180,7 @@ static const struct xe_device_desc rkl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -222,6 +193,7 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
@@ -238,6 +210,7 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
@@ -252,6 +225,7 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -265,7 +239,9 @@ static const struct xe_device_desc dg1_desc = {
PLATFORM(DG1),
.dma_mask_size = 39,
.has_display = true,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -276,6 +252,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
PLATFORM(DG2), \
+ .has_gsc_nvm = 1, \
.has_heci_gscfi = 1, \
.subplatforms = (const struct xe_subplatform_desc[]) { \
{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
@@ -288,6 +265,7 @@ static const struct xe_device_desc ats_m_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
@@ -298,10 +276,13 @@ static const struct xe_device_desc dg2_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = false,
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
@@ -310,9 +291,12 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
PLATFORM(PVC),
.dma_mask_size = 52,
.has_display = false,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
+ .has_mbx_power_limits = false,
};
static const struct xe_device_desc mtl_desc = {
@@ -322,6 +306,7 @@ static const struct xe_device_desc mtl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
};
static const struct xe_device_desc lnl_desc = {
@@ -329,6 +314,8 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
+ .needs_scratch = true,
};
static const struct xe_device_desc bmg_desc = {
@@ -336,7 +323,13 @@ static const struct xe_device_desc bmg_desc = {
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = true,
+ .has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_sriov = true,
+ .max_gt_per_tile = 2,
+ .needs_scratch = true,
};
static const struct xe_device_desc ptl_desc = {
@@ -344,7 +337,8 @@ static const struct xe_device_desc ptl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_sriov = true,
- .require_force_probe = true,
+ .max_gt_per_tile = 2,
+ .needs_scratch = true,
};
#undef PLATFORM
@@ -575,6 +569,9 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_fan_control = desc->has_fan_control;
+ xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
+ xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -583,10 +580,15 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
+ xe->info.needs_scratch = desc->needs_scratch;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
desc->has_display;
+
+ xe_assert(xe, desc->max_gt_per_tile > 0);
+ xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE);
+ xe->info.max_gt_per_tile = desc->max_gt_per_tile;
xe->info.tile_count = 1 + desc->max_remote_tiles;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
@@ -668,6 +670,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
+ xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
for_each_remote_tile(tile, xe, id) {
int err;
@@ -685,10 +688,11 @@ static int xe_info_init(struct xe_device *xe,
*/
for_each_tile(tile, xe, id) {
gt = tile->primary_gt;
- gt->info.id = xe->info.gt_count++;
gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
+ xe->info.gt_count++;
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -706,17 +710,10 @@ static int xe_info_init(struct xe_device *xe,
gt = tile->media_gt;
gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
-
- /*
- * FIXME: At the moment multi-tile and standalone media are
- * mutually exclusive on current platforms. We'll need to
- * come up with a better way to number GTs if we ever wind
- * up with platforms that support both together.
- */
- drm_WARN_ON(&xe->drm, id != 0);
- gt->info.id = xe->info.gt_count++;
+ xe->info.gt_count++;
}
return 0;
@@ -733,7 +730,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
return;
xe_device_remove(xe);
- xe_pm_runtime_fini(xe);
+ xe_pm_fini(xe);
}
/*
@@ -803,18 +800,17 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
- if (err) {
- /*
- * In Boot Survivability mode, no drm card is exposed and driver
- * is loaded with bare minimum to allow for firmware to be
- * flashed through mei. If early probe failed, but it managed to
- * enable survivability mode, return success.
- */
- if (xe_survivability_mode_is_enabled(xe))
- return 0;
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. Return success, if survivability mode
+ * is enabled due to pcode failure or configfs being set
+ */
+ if (xe_survivability_mode_is_enabled(xe))
+ return 0;
+ if (err)
return err;
- }
err = xe_info_init(xe, desc);
if (err)
@@ -920,6 +916,7 @@ static int xe_pci_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 09ee8a06fe2e..af05db07162e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -3,13 +3,20 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/bitops.h>
+#include <linux/pci.h>
+
+#include "regs/xe_bars.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_printk.h"
@@ -111,6 +118,32 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs)
}
}
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
+static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 sizes;
+
+ sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
+ if (!sizes)
+ return 0;
+
+ return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -122,6 +155,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_assert(xe, num_vfs <= total_vfs);
xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+ err = xe_sriov_pf_wait_ready(xe);
+ if (err)
+ goto out;
+
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@@ -137,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ if (IS_DGFX(xe)) {
+ err = resize_vf_vram_bar(xe, num_vfs);
+ if (err)
+ xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
+ }
+
err = pci_enable_sriov(pdev, num_vfs);
if (err < 0)
goto failed;
@@ -145,12 +188,15 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
return num_vfs;
failed:
pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
-
+out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
return err;
@@ -168,6 +214,8 @@ static int pf_disable_vfs(struct xe_device *xe)
if (!num_vfs)
return 0;
+ pf_engine_activity_stats(xe, num_vfs, false);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index e9b9bbc138d3..4de6f69ed975 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -8,6 +8,47 @@
#include <linux/types.h>
+#include "xe_platform_types.h"
+
+struct xe_subplatform_desc {
+ enum xe_subplatform subplatform;
+ const char *name;
+ const u16 *pciidlist;
+};
+
+struct xe_device_desc {
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_graphics_ip;
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_media_ip;
+
+ const char *platform_name;
+ const struct xe_subplatform_desc *subplatforms;
+
+ enum xe_platform platform;
+
+ u8 dma_mask_size;
+ u8 max_remote_tiles:2;
+ u8 max_gt_per_tile:2;
+
+ u8 require_force_probe:1;
+ u8 is_dgfx:1;
+
+ u8 has_display:1;
+ u8 has_fan_control:1;
+ u8 has_gsc_nvm:1;
+ u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
+ u8 has_llc:1;
+ u8 has_mbx_power_limits:1;
+ u8 has_pxp:1;
+ u8 has_sriov:1;
+ u8 needs_scratch:1;
+ u8 skip_guc_pc:1;
+ u8 skip_mtcfg:1;
+ u8 skip_pcode:1;
+};
+
struct xe_graphics_desc {
u8 va_bits;
u8 vm_max_level;
@@ -21,6 +62,7 @@ struct xe_graphics_desc {
u8 has_indirect_ring_state:1;
u8 has_range_tlb_invalidation:1;
u8 has_usm:1;
+ u8 has_64bit_timestamp:1;
};
struct xe_media_desc {
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9333ce776a6e..6a7ddb9005f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/error-injection.h>
#include <drm/drm_managed.h>
@@ -108,6 +109,17 @@ int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout
return err;
}
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0, u32 data1, int timeout)
+{
+ int err;
+
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, &data0, &data1, timeout, false, false);
+ mutex_unlock(&tile->pcode.lock);
+
+ return err;
+}
+
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
@@ -323,3 +335,34 @@ int xe_pcode_probe_early(struct xe_device *xe)
{
return xe_pcode_ready(xe, false);
}
+ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
+
+/* Helpers with drm device. These should only be called by the display side */
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(tile, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index ba33991d72a7..a5584c1c75f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -7,8 +7,10 @@
#define _XE_PCODE_H_
#include <linux/types.h>
-struct xe_tile;
+
+struct drm_device;
struct xe_device;
+struct xe_tile;
void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
@@ -18,6 +20,9 @@ int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0,
+ u32 data1, int timeout);
+
#define xe_pcode_write(tile, mbox, val) \
xe_pcode_write_timeout(tile, mbox, val, 1)
@@ -29,4 +34,12 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
| FIELD_PREP(PCODE_MB_PARAM1, param1)\
| FIELD_PREP(PCODE_MB_PARAM2, param2))
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 2bae9afdbd35..92bfcba51e19 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -34,6 +34,7 @@
#define DGFX_PCODE_STATUS 0x7E
#define DGFX_GET_INIT_STATUS 0x0
#define DGFX_INIT_STATUS_COMPLETE 0x1
+#define DGFX_LINK_DOWNGRADE_STATUS REG_BIT(31)
#define PCODE_POWER_SETUP 0x7C
#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
@@ -42,6 +43,28 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define READ_PSYSGPU_POWER_LIMIT 0x6
+#define WRITE_PSYSGPU_POWER_LIMIT 0x7
+#define READ_PACKAGE_POWER_LIMIT 0x8
+#define WRITE_PACKAGE_POWER_LIMIT 0x9
+#define READ_PL_FROM_FW 0x1
+#define READ_PL_FROM_PCODE 0x0
+
+#define PCODE_LATE_BINDING 0x5C
+#define GET_CAPABILITY_STATUS 0x0
+#define V1_FAN_SUPPORTED REG_BIT(0)
+#define VR_PARAMS_SUPPORTED REG_BIT(3)
+#define V1_FAN_PROVISIONED REG_BIT(16)
+#define VR_PARAMS_PROVISIONED REG_BIT(19)
+#define GET_VERSION_LOW 0x1
+#define GET_VERSION_HIGH 0x2
+#define MAJOR_VERSION_MASK REG_GENMASK(31, 16)
+#define MINOR_VERSION_MASK REG_GENMASK(15, 0)
+#define HOTFIX_VERSION_MASK REG_GENMASK(31, 16)
+#define BUILD_VERSION_MASK REG_GENMASK(15, 0)
+#define FAN_TABLE 1
+#define VR_CONFIG 2
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
@@ -49,6 +72,9 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define FAN_SPEED_CONTROL 0x7D
+#define FSC_READ_NUM_FANS 0x4
+
#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
/* PCODE_SCRATCH0 */
#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
@@ -63,6 +89,10 @@
/* Auxiliary info bits */
#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+#define BMG_PCIE_CAP XE_REG(0x138340)
+#define LINK_DOWNGRADE REG_GENMASK(1, 0)
+#define DOWNGRADE_CAPABLE 2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7b6b754ad6eb..e279b47ba03b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -16,10 +16,10 @@
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
-#include "xe_device_sysfs.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
@@ -135,7 +135,7 @@ int xe_pm_suspend(struct xe_device *xe)
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- goto err_pxp;
+ goto err_display;
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
@@ -147,12 +147,13 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
drm_dbg(&xe->drm, "Device suspended\n");
return 0;
err_display:
xe_display_pm_resume(xe);
-err_pxp:
xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
@@ -188,10 +189,12 @@ int xe_pm_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
@@ -199,7 +202,7 @@ int xe_pm_resume(struct xe_device *xe)
xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
@@ -273,6 +276,7 @@ int xe_pm_init_early(struct xe_device *xe)
if (err)
return err;
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
@@ -286,6 +290,42 @@ static u32 vram_threshold_value(struct xe_device *xe)
return DEFAULT_VRAM_THRESHOLD;
}
+static int xe_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
+ int err = 0;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ xe_pm_runtime_get(xe);
+ err = xe_bo_evict_all_user(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ err = xe_bo_notifier_prepare_all_pinned(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ xe_bo_notifier_unprepare_all_pinned(xe);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ if (err)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -299,33 +339,31 @@ int xe_pm_init(struct xe_device *xe)
u32 vram_threshold;
int err;
+ xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
+ err = register_pm_notifier(&xe->pm_notifier);
+ if (err)
+ return err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
-
if (xe->d3cold.capable) {
- err = xe_device_sysfs_init(xe);
- if (err)
- return err;
-
vram_threshold = vram_threshold_value(xe);
err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
- return err;
+ goto err_unregister;
}
xe_pm_runtime_init(xe);
-
return 0;
+
+err_unregister:
+ unregister_pm_notifier(&xe->pm_notifier);
+ return err;
}
-/**
- * xe_pm_runtime_fini - Finalize Runtime PM
- * @xe: xe device instance
- */
-void xe_pm_runtime_fini(struct xe_device *xe)
+static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -333,6 +371,18 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+/**
+ * xe_pm_fini - Finalize PM
+ * @xe: xe device instance
+ */
+void xe_pm_fini(struct xe_device *xe)
+{
+ if (xe_device_uc_enabled(xe))
+ xe_pm_runtime_fini(xe);
+
+ unregister_pm_notifier(&xe->pm_notifier);
+}
+
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
@@ -442,6 +492,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_runtime_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return 0;
@@ -484,11 +536,13 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
@@ -497,7 +551,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
@@ -641,7 +695,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
return dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
- pm_suspend_target_state != PM_SUSPEND_ON;
+ pm_suspend_in_progress();
#else
return false;
#endif
@@ -707,11 +761,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
}
/**
- * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
+ * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
* @xe: xe device instance
- * @threshold: VRAM size in bites for the D3cold threshold
+ * @threshold: VRAM size in MiB for the D3cold threshold
*
- * Returns 0 for success, negative error code otherwise.
+ * Return:
+ * * 0 - success
+ * * -EINVAL - invalid argument
*/
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
{
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 998d1ed64556..59678b310e55 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe);
int xe_pm_init_early(struct xe_device *xe);
int xe_pm_init(struct xe_device *xe);
-void xe_pm_runtime_fini(struct xe_device *xe);
+void xe_pm_fini(struct xe_device *xe);
bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 4f62a6e515d6..cab51d826345 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -10,9 +10,11 @@
#include "xe_force_wake.h"
#include "xe_gt_idle.h"
#include "xe_guc_engine_activity.h"
+#include "xe_guc_pc.h"
#include "xe_hw_engine.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_sriov_pf_helpers.h"
/**
* DOC: Xe PMU (Performance Monitoring Unit)
@@ -32,9 +34,10 @@
* gt[60:63] Selects gt for the event
* engine_class[20:27] Selects engine-class for event
* engine_instance[12:19] Selects the engine-instance for the event
+ * function[44:59] Selects the function of the event (SRIOV enabled)
*
* For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
- * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled.
*
* For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
*
@@ -49,6 +52,7 @@
*/
#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44)
#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
@@ -58,6 +62,11 @@ static unsigned int config_to_event_id(u64 config)
return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
}
+static unsigned int config_to_function_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config);
+}
+
static unsigned int config_to_engine_class(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
@@ -76,6 +85,8 @@ static unsigned int config_to_gt_id(u64 config)
#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
+#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
static struct xe_gt *event_to_gt(struct perf_event *event)
{
@@ -111,6 +122,14 @@ static bool is_engine_event(u64 config)
event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
}
+static bool is_gt_frequency_event(struct perf_event *event)
+{
+ u32 id = config_to_event_id(event->attr.config);
+
+ return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY ||
+ id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY;
+}
+
static bool event_gt_forcewake(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
@@ -118,7 +137,7 @@ static bool event_gt_forcewake(struct perf_event *event)
struct xe_gt *gt;
unsigned int *fw_ref;
- if (!is_engine_event(config))
+ if (!is_engine_event(config) && !is_gt_frequency_event(event))
return true;
gt = xe_device_get_gt(xe, config_to_gt_id(config));
@@ -138,10 +157,13 @@ static bool event_gt_forcewake(struct perf_event *event)
return true;
}
-static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
+static bool event_supported(struct xe_pmu *pmu, unsigned int gt_id,
unsigned int id)
{
- if (gt >= XE_MAX_GT_PER_TILE)
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, gt_id);
+
+ if (!gt)
return false;
return id < sizeof(pmu->supported_events) * BITS_PER_BYTE &&
@@ -151,7 +173,7 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
static bool event_param_valid(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
- unsigned int engine_class, engine_instance;
+ unsigned int engine_class, engine_instance, function_id;
u64 config = event->attr.config;
struct xe_gt *gt;
@@ -161,16 +183,28 @@ static bool event_param_valid(struct perf_event *event)
engine_class = config_to_engine_class(config);
engine_instance = config_to_engine_instance(config);
+ function_id = config_to_function_id(config);
switch (config_to_event_id(config)) {
case XE_PMU_EVENT_GT_C6_RESIDENCY:
- if (engine_class || engine_instance)
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ if (engine_class || engine_instance || function_id)
return false;
break;
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
if (!event_to_hwe(event))
return false;
+
+ /* PF(0) and total vfs when SRIOV is enabled */
+ if (IS_SRIOV_PF(xe)) {
+ if (function_id > xe_sriov_pf_get_totalvfs(xe))
+ return false;
+ } else if (function_id) {
+ return false;
+ }
+
break;
}
@@ -242,13 +276,17 @@ static int xe_pmu_event_init(struct perf_event *event)
static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
{
struct xe_hw_engine *hwe;
- u64 val = 0;
+ unsigned int function_id;
+ u64 config, val = 0;
+
+ config = event->attr.config;
+ function_id = config_to_function_id(config);
hwe = event_to_hwe(event);
- if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
- val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe, function_id);
else
- val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe, function_id);
return val;
}
@@ -266,6 +304,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
return read_engine_events(gt, event);
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ return xe_guc_pc_get_act_freq(&gt->uc.guc.pc);
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ return xe_guc_pc_get_cur_freq_fw(&gt->uc.guc.pc);
}
return 0;
@@ -281,7 +323,14 @@ static void xe_pmu_event_update(struct perf_event *event)
new = __xe_pmu_event_read(event);
} while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
- local64_add(new - prev, &event->count);
+ /*
+ * GT frequency is not a monotonically increasing counter, so add the
+ * instantaneous value instead.
+ */
+ if (is_gt_frequency_event(event))
+ local64_add(new, &event->count);
+ else
+ local64_add(new - prev, &event->count);
}
static void xe_pmu_event_read(struct perf_event *event)
@@ -351,6 +400,7 @@ static void xe_pmu_event_del(struct perf_event *event, int flags)
}
PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(function, "config:44-59");
PMU_FORMAT_ATTR(engine_class, "config:20-27");
PMU_FORMAT_ATTR(engine_instance, "config:12-19");
PMU_FORMAT_ATTR(event, "config:0-11");
@@ -359,6 +409,7 @@ static struct attribute *pmu_format_attrs[] = {
&format_attr_event.attr,
&format_attr_engine_class.attr,
&format_attr_engine_instance.attr,
+ &format_attr_function.attr,
&format_attr_gt.attr,
NULL,
};
@@ -419,6 +470,10 @@ static ssize_t event_attr_show(struct device *dev,
XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency,
+ XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz");
+XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency,
+ XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz");
static struct attribute *pmu_empty_event_attrs[] = {
/* Empty - all events are added as groups with .attr_update() */
@@ -434,6 +489,8 @@ static const struct attribute_group *pmu_events_attr_update[] = {
&pmu_group_gt_c6_residency,
&pmu_group_engine_active_ticks,
&pmu_group_engine_total_ticks,
+ &pmu_group_gt_actual_frequency,
+ &pmu_group_gt_requested_frequency,
NULL,
};
@@ -442,8 +499,11 @@ static void set_supported_events(struct xe_pmu *pmu)
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
struct xe_gt *gt = xe_device_get_gt(xe, 0);
- if (!xe->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
+ }
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ffaf0d02dc7d..c8e63bd23300 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
@@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -269,8 +272,11 @@ struct xe_pt_update {
bool preexisting;
};
+/**
+ * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
+ */
struct xe_pt_stage_bind_walk {
- /** base: The base class. */
+ /** @base: The base class. */
struct xe_pt_walk base;
/* Input parameters for the walk */
@@ -278,15 +284,19 @@ struct xe_pt_stage_bind_walk {
struct xe_vm *vm;
/** @tile: The tile we're building for. */
struct xe_tile *tile;
- /** @default_pte: PTE flag only template. No address is associated */
- u64 default_pte;
+ /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
+ u64 default_vram_pte;
+ /** @default_system_pte: PTE flag only template for System. No address is associated */
+ u64 default_system_pte;
/** @dma_offset: DMA offset to add to the PTE. */
u64 dma_offset;
/**
- * @needs_64k: This address range enforces 64K alignment and
- * granularity.
+ * @needs_64K: This address range enforces 64K alignment and
+ * granularity on VRAM.
*/
bool needs_64K;
+ /** @clear_pt: clear page table entries during the bind walk */
+ bool clear_pt;
/**
* @vma: VMA being mapped
*/
@@ -299,6 +309,7 @@ struct xe_pt_stage_bind_walk {
u64 va_curs_start;
/* Output */
+ /** @wupd: Walk output data for page-table updates. */
struct xe_walk_update {
/** @wupd.entries: Caller provided storage. */
struct xe_vm_pgtable_update *entries;
@@ -316,7 +327,7 @@ struct xe_pt_stage_bind_walk {
u64 l0_end_addr;
/** @addr_64K: The start address of the current 64K chunk. */
u64 addr_64K;
- /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
bool found_64K;
};
@@ -436,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
if (xe_vma_is_null(xe_walk->vma))
return true;
+ /* if we are clearing page table, no dma addresses*/
+ if (xe_walk->clear_pt)
+ return true;
+
/* Is the DMA address huge PTE size aligned? */
size = next - addr;
dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
@@ -515,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
struct xe_res_cursor *curs = xe_walk->curs;
bool is_null = xe_vma_is_null(xe_walk->vma);
+ bool is_vram = is_null ? false : xe_res_is_vram(curs);
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, pat_index, level);
- pte |= xe_walk->default_pte;
+ if (xe_walk->clear_pt) {
+ pte = 0;
+ } else {
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) +
+ xe_walk->dma_offset,
+ xe_walk->vma,
+ pat_index, level);
+ if (!is_null)
+ pte |= is_vram ? xe_walk->default_vram_pte :
+ xe_walk->default_system_pte;
- /*
- * Set the XE_PTE_PS64 hint if possible, otherwise if
- * this device *requires* 64K PTE size for VRAM, fail.
- */
- if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
- xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
- pte |= XE_PTE_PS64;
- } else if (XE_WARN_ON(xe_walk->needs_64K)) {
- return -EINVAL;
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |=
+ XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K &&
+ is_vram)) {
+ return -EINVAL;
+ }
}
}
@@ -540,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (unlikely(ret))
return ret;
- if (!is_null)
+ if (!is_null && !xe_walk->clear_pt)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
@@ -603,6 +629,44 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
.pt_entry = xe_pt_stage_bind_entry,
};
+/*
+ * Default atomic expectations for different allocation scenarios are as follows:
+ *
+ * 1. Traditional API: When the VM is not in LR mode:
+ * - Device atomics are expected to function with all allocations.
+ *
+ * 2. Compute/SVM API: When the VM is in LR mode:
+ * - Device atomics are the default behavior when the bo is placed in a single region.
+ * - In all other cases device atomics will be disabled with AE=0 until an application
+ * request differently using a ioctl like madvise.
+ */
+static bool xe_atomic_for_vram(struct xe_vm *vm)
+{
+ return true;
+}
+
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_device *xe = vm->xe;
+
+ if (!xe->info.has_device_atomics_on_smem)
+ return false;
+
+ /*
+ * If a SMEM+LMEM allocation is backed by SMEM, a device
+ * atomics will cause a gpu page fault and which then
+ * gets migrated to LMEM, bind such allocations with
+ * device atomics enabled.
+ *
+ * TODO: Revisit this. Perhaps add something like a
+ * fault_on_atomics_in_system UAPI flag.
+ * Note that this also prohibits GPU atomics in LR mode for
+ * userptr and system memory on DGFX.
+ */
+ return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
+ (bo && xe_bo_has_single_placement(bo))));
+}
+
/**
* xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
* range.
@@ -612,6 +676,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
+ * @clear_pt: Clear the page table entries.
*
* This function builds a disconnected page-table tree for a given address
* range. The tree is connected to the main vm tree for the gpu using
@@ -625,13 +690,13 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool clear_pt)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_bo *bo = xe_vma_bo(vma);
- bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
- (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
struct xe_res_cursor curs;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_pt_stage_bind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_bind_ops,
@@ -639,34 +704,31 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.max_level = XE_PT_HIGHEST_LEVEL,
.staging = true,
},
- .vm = xe_vma_vm(vma),
+ .vm = vm,
.tile = tile,
.curs = &curs,
.va_curs_start = range ? range->base.itree.start :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
+ .clear_pt = clear_pt,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
int ret;
if (range) {
/* Move this entire thing to xe_svm.c? */
- xe_svm_notifier_lock(xe_vma_vm(vma));
+ xe_svm_notifier_lock(vm);
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "BIND PREPARE - RETRY");
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
- is_devmem = xe_res_is_vram(&curs);
- if (is_devmem)
- xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
- else
- xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
xe_assert(xe, false);
}
@@ -674,54 +736,21 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* Note, when unlocking the resource cursor dma addresses may become
* stale, but the bind will be aborted anyway at commit time.
*/
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
}
- xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+ xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
+ if (clear_pt)
+ goto walk_pt;
- /**
- * Default atomic expectations for different allocation scenarios are as follows:
- *
- * 1. Traditional API: When the VM is not in LR mode:
- * - Device atomics are expected to function with all allocations.
- *
- * 2. Compute/SVM API: When the VM is in LR mode:
- * - Device atomics are the default behavior when the bo is placed in a single region.
- * - In all other cases device atomics will be disabled with AE=0 until an application
- * request differently using a ioctl like madvise.
- */
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
- if (bo && xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- /**
- * If a SMEM+LMEM allocation is backed by SMEM, a device
- * atomics will cause a gpu page fault and which then
- * gets migrated to LMEM, bind such allocations with
- * device atomics enabled.
- */
- else if (is_devmem)
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- } else {
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- }
-
- /**
- * Unset AE if the platform(PVC) doesn't support it on an
- * allocation
- */
- if (!xe->info.has_device_atomics_on_smem && !is_devmem)
- xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ XE_USM_PPGTT_PTE_AE : 0;
}
- if (is_devmem) {
- xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
- }
-
- if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
- xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
-
+ xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
if (!range)
xe_bo_assert_held(bo);
@@ -739,6 +768,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
curs.size = xe_vma_size(vma);
}
+walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
range ? range->base.itree.start : xe_vma_start(vma),
range ? range->base.itree.last + 1 : xe_vma_end(vma),
@@ -877,6 +907,11 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
+ if (xe_vma_bo(vma))
+ xe_bo_assert_held(xe_vma_bo(vma));
+ else if (xe_vma_is_userptr(vma))
+ lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1103,12 +1138,14 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool invalidate_on_bind)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
+ invalidate_on_bind);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1420,11 +1457,13 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_vm *vm = pt_update->vops->vm;
struct xe_vma_ops *vops = pt_update->vops;
struct xe_vma_op *op;
+ unsigned long i;
int err;
err = xe_pt_pre_commit(pt_update);
@@ -1434,25 +1473,41 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
xe_svm_notifier_lock(vm);
list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = op->map_range.range;
+ struct xe_svm_range *range = NULL;
if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
continue;
- xe_svm_range_debug(range, "PRE-COMMIT");
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ xe_assert(vm->xe,
+ xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -ENODATA;
+ }
+ }
+ } else {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ range = op->map_range.range;
- xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ xe_svm_range_debug(range, "PRE-COMMIT");
- if (!xe_svm_range_pages_valid(range)) {
- xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
- return -EAGAIN;
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
}
}
return 0;
}
+#endif
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1791,7 +1846,7 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ struct xe_vma *vma, bool invalidate_on_bind)
{
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
@@ -1813,7 +1868,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, invalidate_on_bind);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1835,11 +1890,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
* If !rebind, and scratch enabled VMs, there is a chance the scratch
* PTE is already cached in the TLB so it needs to be invalidated.
* On !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
+ * LR, in particular on user-space batch buffer chaining, it needs to
+ * be done here.
*/
if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
- xe_vm_in_preempt_fence_mode(vm)))
+ xe_vm_in_lr_mode(vm)))
pt_update_ops->needs_invalidation = true;
else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
/* We bump also if batch_invalidate_tlb is true */
@@ -1875,7 +1930,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->rebind = BIT(tile->id) & range->tile_present;
err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, false);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1940,6 +1995,32 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static bool
+xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
+ struct xe_svm_range *range)
+{
+ struct xe_vm_pgtable_update *update = pt_op->entries;
+
+ XE_WARN_ON(!pt_op->num_entries);
+
+ /*
+ * We can't skip the invalidation if we are removing PTEs that span more
+ * than the range, do some checks to ensure we are removing PTEs that
+ * are invalid.
+ */
+
+ if (pt_op->num_entries > 1)
+ return false;
+
+ if (update->pt->level == 0)
+ return true;
+
+ if (update->pt->level == 1)
+ return xe_svm_range_size(range) >= SZ_2M;
+
+ return false;
+}
+
static int unbind_range_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1968,7 +2049,10 @@ static int unbind_range_prepare(struct xe_vm *vm,
range->base.itree.last + 1);
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
- pt_update_ops->needs_invalidation = true;
+ pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
+ xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) ||
+ !xe_pt_op_check_range_skip_invalidation(pt_op, range);
xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
pt_op->num_entries);
@@ -1987,11 +2071,13 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
+ !op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
+ op->map.invalidate_on_bind);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
@@ -2005,12 +2091,12 @@ static int op_prepare(struct xe_vm *vm,
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.prev);
+ op->remap.prev, false);
pt_update_ops->wait_vm_bookkeep = true;
}
if (!err && op->remap.next) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.next);
+ op->remap.next, false);
pt_update_ops->wait_vm_bookkeep = true;
}
break;
@@ -2029,11 +2115,20 @@ static int op_prepare(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (xe_vma_is_cpu_addr_mirror(vma))
- break;
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range;
+ unsigned long i;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma);
- pt_update_ops->wait_vm_kernel = true;
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ vma, range);
+ if (err)
+ return err;
+ }
+ } else {
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
+ pt_update_ops->wait_vm_kernel = true;
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
@@ -2115,7 +2210,7 @@ ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
- struct dma_fence *fence2)
+ struct dma_fence *fence2, bool invalidate_on_bind)
{
xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
@@ -2130,7 +2225,14 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- vma->tile_present |= BIT(tile->id);
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
+ if (invalidate_on_bind)
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(tile->id));
+ else
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated & ~BIT(tile->id));
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
@@ -2178,6 +2280,18 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
}
+static void range_present_and_invalidated_tile(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_id)
+{
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+
+ WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
+ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
+}
+
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2193,7 +2307,7 @@ static void op_commit(struct xe_vm *vm,
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
- fence2);
+ fence2, op->map.invalidate_on_bind);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2206,10 +2320,10 @@ static void op_commit(struct xe_vm *vm,
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence, fence2);
+ fence, fence2, false);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence, fence2);
+ fence, fence2, false);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2225,19 +2339,28 @@ static void op_commit(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (!xe_vma_is_cpu_addr_mirror(vma))
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range = NULL;
+ unsigned long i;
+
+ xa_for_each(&op->prefetch_range.range, i, range)
+ range_present_and_invalidated_tile(vm, range, tile->id);
+ } else {
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
- fence2);
+ fence2, false);
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
{
- if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- op->map_range.range->tile_present |= BIT(tile->id);
- op->map_range.range->tile_invalidated &= ~BIT(tile->id);
- } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
- op->unmap_range.range->tile_present &= ~BIT(tile->id);
- }
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
+ range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
+ else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
+ WRITE_ONCE(op->unmap_range.range->tile_present,
+ op->unmap_range.range->tile_present &
+ ~BIT(tile->id));
+
break;
}
default:
@@ -2257,11 +2380,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_svm_pre_commit,
};
+#else
+static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+#endif
/**
* xe_pt_update_ops_run() - Run PT update operations
@@ -2426,7 +2553,7 @@ free_ifence:
kfree(mfence);
kfree(ifence);
kill_vm_tile1:
- if (err != -EAGAIN && tile->id)
+ if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 454ea7dc08ac..3d62008c99f1 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -504,65 +504,62 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t
return 0;
}
-static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
- spin_lock_irq(&pxp->queues.lock);
- list_add_tail(&q->pxp.link, &pxp->queues.list);
- spin_unlock_irq(&pxp->queues.lock);
+ int ret = 0;
+
+ /*
+ * A queue can be added to the list only if the PXP is in active status,
+ * otherwise the termination might not handle it correctly.
+ */
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE) {
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+ } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
+ ret = -EIO;
+ } else {
+ ret = -EBUSY; /* try again later */
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ return ret;
}
-/**
- * xe_pxp_exec_queue_add - add a queue to the PXP list
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
- * @q: the queue to add to the list
- *
- * If PXP is enabled and the prerequisites are done, start the PXP ARB
- * session (if not already running) and add the queue to the PXP list. Note
- * that the queue must have previously been marked as using PXP with
- * xe_pxp_exec_queue_set_type.
- *
- * Returns 0 if the PXP ARB session is running and the queue is in the list,
- * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
- * other errno value if something goes wrong during the session start.
- */
-int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int pxp_start(struct xe_pxp *pxp, u8 type)
{
int ret = 0;
+ bool restart = false;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
/* we only support HWDRM sessions right now */
- xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
- /*
- * Runtime suspend kills PXP, so we take a reference to prevent it from
- * happening while we have active queues that use PXP
- */
- xe_pm_runtime_get(pxp->xe);
+ /* get_readiness_status() returns 0 for in-progress and 1 for done */
+ ret = xe_pxp_get_readiness_status(pxp);
+ if (ret <= 0)
+ return ret ?: -EBUSY;
- if (!pxp_prerequisites_done(pxp)) {
- ret = -EBUSY;
- goto out;
- }
+ ret = 0;
wait_for_idle:
/*
* if there is an action in progress, wait for it. We need to wait
* outside the lock because the completion is done from within the lock.
- * Note that the two action should never be pending at the same time.
+ * Note that the two actions should never be pending at the same time.
*/
if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
if (!wait_for_completion_timeout(&pxp->activation,
- msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
mutex_lock(&pxp->mutex);
@@ -570,11 +567,9 @@ wait_for_idle:
switch (pxp->status) {
case XE_PXP_ERROR:
ret = -EIO;
- break;
+ goto out_unlock;
case XE_PXP_ACTIVE:
- __exec_queue_add(pxp, q);
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
case XE_PXP_READY_TO_START:
pxp->status = XE_PXP_START_IN_PROGRESS;
reinit_completion(&pxp->activation);
@@ -582,8 +577,8 @@ wait_for_idle:
case XE_PXP_START_IN_PROGRESS:
/* If a start is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->activation));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_NEEDS_TERMINATION:
mark_termination_in_progress(pxp);
break;
@@ -591,29 +586,25 @@ wait_for_idle:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
/* If a termination is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->termination));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_SUSPENDED:
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
ret = -EIO;
- break;
+ goto out_unlock;
}
mutex_unlock(&pxp->mutex);
- if (ret)
- goto out;
-
if (!completion_done(&pxp->termination)) {
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
}
goto wait_for_idle;
@@ -635,21 +626,59 @@ wait_for_idle:
if (pxp->status != XE_PXP_START_IN_PROGRESS) {
drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
pxp->status = XE_PXP_NEEDS_TERMINATION;
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
}
/* If everything went ok, update the status and add the queue to the list */
- if (!ret) {
+ if (!ret)
pxp->status = XE_PXP_ACTIVE;
- __exec_queue_add(pxp, q);
- } else {
+ else
pxp->status = XE_PXP_ERROR;
- }
+out_unlock:
mutex_unlock(&pxp->mutex);
-out:
+ if (restart)
+ goto wait_for_idle;
+
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP default
+ * session (if not already running) and add the queue to the PXP list.
+ *
+ * Returns 0 if the PXP session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+start:
+ ret = pxp_start(pxp, q->pxp.type);
+
+ if (!ret) {
+ ret = __exec_queue_add(pxp, q);
+ if (ret == -EBUSY)
+ goto start;
+ }
+
/*
* in the successful case the PM ref is released from
* xe_pxp_exec_queue_remove
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 5e65830dad25..d517ec9ddcbf 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -141,7 +141,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id >= XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= xe->info.max_gt_per_tile)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
@@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
- if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
@@ -368,6 +368,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
struct drm_xe_query_gt_list __user *query_ptr =
u64_to_user_ptr(query->data);
struct drm_xe_query_gt_list *gt_list;
+ int iter = 0;
u8 id;
if (query->size == 0) {
@@ -385,12 +386,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
else
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
- gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
- gt_list->gt_list[id].gt_id = gt->info.id;
- gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+ gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
+ gt_list->gt_list[iter].gt_id = gt->info.id;
+ gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
/*
* The mem_regions indexes in the mask below need to
* directly identify the struct
@@ -406,19 +407,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
* assumption.
*/
if (!IS_DGFX(xe))
- gt_list->gt_list[id].near_mem_regions = 0x1;
+ gt_list->gt_list[iter].near_mem_regions = 0x1;
else
- gt_list->gt_list[id].near_mem_regions =
+ gt_list->gt_list[iter].near_mem_regions =
BIT(gt_to_tile(gt)->id) << 1;
- gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
- gt_list->gt_list[id].near_mem_regions;
+ gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
+ gt_list->gt_list[iter].near_mem_regions;
- gt_list->gt_list[id].ip_ver_major =
+ gt_list->gt_list[iter].ip_ver_major =
REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_minor =
+ gt_list->gt_list[iter].ip_ver_minor =
REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_rev =
+ gt_list->gt_list[iter].ip_ver_rev =
REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
+
+ iter++;
}
if (copy_to_user(query_ptr, gt_list, size)) {
@@ -683,8 +686,8 @@ static int query_oa_units(struct xe_device *xe,
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
- DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
-
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
+ DRM_XE_OA_CAPS_OAM;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
if (!xe_hw_engine_is_reserved(hwe) &&
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 9475e3f74958..fc8447a838c4 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
if (xa_empty(&sr->xa))
return;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index a7582b097ae6..7b50c7c1ee21 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
- dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
- dw[i++] = 0;
+ MI_FLUSH_IMM_DW;
+
+ dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
+ dw[i++] = val;
return i;
}
@@ -234,13 +235,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
- dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
- MI_COPY_MEM_MEM_DST_GGTT;
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
- dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
- dw[i++] = 0;
- dw[i++] = MI_NOOP;
return i;
}
@@ -400,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
static void emit_migration_job_gen12(struct xe_sched_job *job,
struct xe_lrc *lrc, u32 seqno)
{
+ u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
i = emit_copy_timestamp(lrc, dw, i);
- i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, dw, i);
+ i = emit_store_imm_ggtt(saddr, seqno, dw, i);
dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
- if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
- /* XXX: Do we need this? Leaving for now. */
- dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(dw, i);
- dw[i++] = preparser_disable(false);
- }
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(saddr, seqno, dw, i);
+ dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
index 1ae56e2ee7b4..d7e3e150a9a5 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops_types.h
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -8,7 +8,7 @@
struct xe_sched_job;
-#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_DW 58
#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 13bb62d3e615..95571b87aa73 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -56,37 +56,61 @@ static bool rule_matches(const struct xe_device *xe,
xe->info.subplatform == r->subplatform;
break;
case XE_RTP_MATCH_GRAPHICS_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 >= r->ver_start &&
xe->info.graphics_verx100 <= r->ver_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_GRAPHICS_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.graphics >= r->step_start &&
xe->info.step.graphics < r->step_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 >= r->ver_start &&
xe->info.media_verx100 <= r->ver_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.media >= r->step_start &&
xe->info.step.media < r->step_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_INTEGRATED:
@@ -108,6 +132,9 @@ static bool rule_matches(const struct xe_device *xe,
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = r->match_func(gt, hwe);
break;
default:
@@ -186,6 +213,11 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
struct xe_device **xe)
{
switch (ctx->type) {
+ case XE_RTP_PROCESS_TYPE_DEVICE:
+ *hwe = NULL;
+ *gt = NULL;
+ *xe = ctx->xe;
+ break;
case XE_RTP_PROCESS_TYPE_GT:
*hwe = NULL;
*gt = ctx->gt;
@@ -258,9 +290,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
- if (IS_SRIOV_VF(xe))
- return;
-
xe_assert(xe, entries);
for (entry = entries; entry - entries < n_entries; entry++) {
@@ -329,21 +358,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
hwe->engine_id == __ffs(render_compute_mask);
}
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe)
-{
- unsigned int dss_per_gslice = 4;
- unsigned int dss;
-
- if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask),
- "Checking gslice for platform without geometry pipeline\n"))
- return false;
-
- dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0);
-
- return dss >= dss_per_gslice;
-}
-
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 4fe736a11c42..5ed6c14b9ae3 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -422,7 +422,8 @@ struct xe_reg_sr;
#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
- struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+ struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }, \
+ struct xe_device * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_DEVICE })
void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
unsigned long *active_entries,
@@ -466,17 +467,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
- * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off
- *
- * @gt: GT structure
- * @hwe: Engine instance
- *
- * Returns: true if first gslice is fused off, false otherwise.
- */
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe);
-
-/*
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
*
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index 1b76b947c706..f4cf30e298cf 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -110,12 +110,14 @@ struct xe_rtp_entry {
};
enum xe_rtp_process_type {
+ XE_RTP_PROCESS_TYPE_DEVICE,
XE_RTP_PROCESS_TYPE_GT,
XE_RTP_PROCESS_TYPE_ENGINE,
};
struct xe_rtp_process_ctx {
union {
+ struct xe_device *xe;
struct xe_gt *gt;
struct xe_hw_engine *hwe;
};
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index f8fe61e25518..1d43e183ca21 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -60,7 +60,8 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
size / SZ_1K, bo);
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 1905ca590965..d21bf8f26964 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -113,7 +113,8 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
kref_init(&job->refcount);
xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
if (err)
goto err_free;
@@ -216,15 +217,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
bool xe_sched_job_started(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_start_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_start_seqno(lrc));
}
bool xe_sched_job_completed(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
/*
@@ -232,9 +235,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
* parallel handshake is done.
*/
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_seqno(lrc));
}
void xe_sched_job_arm(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 8184390f9c7b..90244fe59b59 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -5,6 +5,7 @@
#include <linux/shrinker.h>
+#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -53,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
write_unlock(&shrinker->lock);
}
-static s64 xe_shrinker_walk(struct xe_device *xe,
- struct ttm_operation_ctx *ctx,
- const struct xe_bo_shrink_flags flags,
- unsigned long to_scan, unsigned long *scanned)
+static s64 __xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
{
unsigned int mem_type;
s64 freed = 0, lret;
@@ -65,11 +66,15 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
struct ttm_bo_lru_cursor curs;
struct ttm_buffer_object *ttm_bo;
+ struct ttm_lru_walk_arg arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ };
if (!man || !man->use_tt)
continue;
- ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, &arg, ttm_bo) {
if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
continue;
@@ -81,6 +86,50 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
if (*scanned >= to_scan)
break;
}
+ /* Trylocks should never error, just fail. */
+ xe_assert(xe, !IS_ERR(ttm_bo));
+ }
+
+ return freed;
+}
+
+/*
+ * Try shrinking idle objects without writeback first, then if not sufficient,
+ * try also non-idle objects and finally if that's not sufficient either,
+ * add writeback. This avoids stalls and explicit writebacks with light or
+ * moderate memory pressure.
+ */
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ bool no_wait_gpu = true;
+ struct xe_bo_shrink_flags save_flags = flags;
+ s64 lret, freed;
+
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ save_flags.writeback = false;
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ if (lret < 0 || *scanned >= to_scan)
+ return lret;
+
+ freed = lret;
+ if (!ctx->no_wait_gpu) {
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ if (*scanned >= to_scan)
+ return freed;
+ }
+
+ if (flags.writeback) {
+ lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
}
return freed;
@@ -192,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
shrink_flags.purge = false;
+
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
nr_to_scan, &nr_scanned);
if (lret >= 0)
@@ -213,24 +263,34 @@ static void xe_shrinker_pm(struct work_struct *work)
xe_pm_runtime_put(shrinker->xe);
}
+static void xe_shrinker_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_shrinker *shrinker = arg;
+
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
+
/**
* xe_shrinker_create() - Create an xe per-device shrinker
* @xe: Pointer to the xe device.
*
- * Returns: A pointer to the created shrinker on success,
- * Negative error code on failure.
+ * Return: %0 on success. Negative error code on failure.
*/
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+int xe_shrinker_create(struct xe_device *xe)
{
struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
if (!shrinker)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
+ shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
@@ -240,19 +300,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
shrinker->shrink->scan_objects = xe_shrinker_scan;
shrinker->shrink->private_data = shrinker;
shrinker_register(shrinker->shrink);
+ xe->mem.shrinker = shrinker;
- return shrinker;
-}
-
-/**
- * xe_shrinker_destroy() - Destroy an xe per-device shrinker
- * @shrinker: Pointer to the shrinker to destroy.
- */
-void xe_shrinker_destroy(struct xe_shrinker *shrinker)
-{
- xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
- xe_assert(shrinker->xe, !shrinker->purgeable_pages);
- shrinker_free(shrinker->shrink);
- flush_work(&shrinker->pm_worker);
- kfree(shrinker);
+ return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
index 28a038f4fcbf..5132ae5192e1 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.h
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -11,8 +11,6 @@ struct xe_device;
void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
-
-void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+int xe_shrinker_create(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index 0f721ae17b26..27ddf3cc80e9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -3,13 +3,18 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_service.h"
#include "xe_sriov_printk.h"
static unsigned int wanted_max_vfs(struct xe_device *xe)
@@ -80,9 +85,48 @@ bool xe_sriov_pf_readiness(struct xe_device *xe)
*/
int xe_sriov_pf_init_early(struct xe_device *xe)
{
+ int err;
+
xe_assert(xe, IS_SRIOV_PF(xe));
- return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe),
+ sizeof(*xe->sriov.pf.vfs), GFP_KERNEL);
+ if (!xe->sriov.pf.vfs)
+ return -ENOMEM;
+
+ err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ if (err)
+ return err;
+
+ xe_sriov_pf_service_init(xe);
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
+ * @xe: the &xe_device to test
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_wait_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_wait_ready(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -102,3 +146,45 @@ void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
}
+
+static int simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_device *xe = parent->d_inode->i_private;
+ void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+
+ print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
+ { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
+};
+
+/**
+ * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Prepare debugfs attributes exposed by the PF.
+ */
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *parent;
+
+ /*
+ * /sys/kernel/debug/dri/0/
+ * ├── pf
+ * │   ├── ...
+ */
+ parent = debugfs_create_dir("pf", root);
+ if (IS_ERR(parent))
+ return;
+ parent->d_inode->i_private = xe;
+
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index d1220e70e1c0..e3b34f8f5e04 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -8,12 +8,15 @@
#include <linux/types.h>
+struct dentry;
struct drm_printer;
struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_wait_ready(struct xe_device *xe);
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
@@ -25,6 +28,10 @@ static inline int xe_sriov_pf_init_early(struct xe_device *xe)
{
return 0;
}
+
+static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
new file mode 100644
index 000000000000..eee3b2a1ba41
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#include "abi/guc_relay_actions_abi.h"
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_printk.h"
+
+#include "xe_sriov_pf_service.h"
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * xe_sriov_pf_service_init - Early initialization of the SR-IOV PF service.
+ * @xe: the &xe_device to initialize
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_init(struct xe_device *xe)
+{
+ BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
+ BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* base versions may differ between platforms */
+ xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
+ xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
+
+ /* latest version is same for all platforms */
+ xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
+ xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
+}
+
+/* Return: 0 on success or a negative error code on failure. */
+static int pf_negotiate_version(struct xe_device *xe,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base;
+ struct xe_sriov_pf_service_version latest = xe->sriov.pf.service.version.latest;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, base.major);
+ xe_assert(xe, base.major <= latest.major);
+ xe_assert(xe, (base.major < latest.major) || (base.minor <= latest.minor));
+
+ /* VF doesn't care - return our latest */
+ if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
+ wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants newer than our - return our latest */
+ if (wanted_major > latest.major) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants older than min required - reject */
+ if (wanted_major < base.major ||
+ (wanted_major == base.major && wanted_minor < base.minor)) {
+ return -EPERM;
+ }
+
+ /* previous major - return wanted, as we should still support it */
+ if (wanted_major < latest.major) {
+ /* XXX: we are not prepared for multi-versions yet */
+ xe_assert(xe, base.major == latest.major);
+ return -ENOPKG;
+ }
+
+ /* same major - return common minor */
+ *major = wanted_major;
+ *minor = min_t(u32, latest.minor, wanted_minor);
+ return 0;
+}
+
+static void pf_connect(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+ xe_assert(xe, major || minor);
+
+ xe->sriov.pf.vfs[vfid].version.major = major;
+ xe->sriov.pf.vfs[vfid].version.minor = minor;
+}
+
+static void pf_disconnect(struct xe_device *xe, u32 vfid)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ xe->sriov.pf.vfs[vfid].version.major = 0;
+ xe->sriov.pf.vfs[vfid].version.minor = 0;
+}
+
+/**
+ * xe_sriov_pf_service_is_negotiated - Check if VF has negotiated given ABI version.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @major: the major version to check
+ * @minor: the minor version to check
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ *
+ * Returns: true if VF can use given ABI version functionality.
+ */
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ return major == xe->sriov.pf.vfs[vfid].version.major &&
+ minor <= xe->sriov.pf.vfs[vfid].version.minor;
+}
+
+/**
+ * xe_sriov_pf_service_handshake_vf - Confirm a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @wanted_major: the major service version expected by the VF
+ * @wanted_minor: the minor service version expected by the VF
+ * @major: the major service version to be used by the VF
+ * @minor: the minor service version to be used by the VF
+ *
+ * Negotiate a VF/PF ABI version to allow VF use the PF services.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ int err;
+
+ xe_sriov_dbg_verbose(xe, "VF%u wants ABI version %u.%u\n",
+ vfid, wanted_major, wanted_minor);
+
+ err = pf_negotiate_version(xe, wanted_major, wanted_minor, major, minor);
+
+ if (err < 0) {
+ xe_sriov_notice(xe, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
+ vfid, wanted_major, wanted_minor, ERR_PTR(err));
+ pf_disconnect(xe, vfid);
+ } else {
+ xe_sriov_dbg(xe, "VF%u negotiated ABI version %u.%u\n",
+ vfid, *major, *minor);
+ pf_connect(xe, vfid, *major, *minor);
+ }
+
+ return err;
+}
+
+/**
+ * xe_sriov_pf_service_reset_vf - Reset a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Reset a VF driver negotiated VF/PF ABI version.
+ *
+ * After that point, the VF driver will have to perform new version handshake
+ * to continue use of the PF services again.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid)
+{
+ pf_disconnect(xe, vfid);
+}
+
+static void print_pf_version(struct drm_printer *p, const char *name,
+ const struct xe_sriov_pf_service_version *version)
+{
+ drm_printf(p, "%s:\t%u.%u\n", name, version->major, version->minor);
+}
+
+/**
+ * xe_sriov_pf_service_print_versions - Print ABI versions negotiated with VFs.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for PF use only.
+ */
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+ struct xe_sriov_pf_service_version *version;
+ char name[8];
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ print_pf_version(p, "base", &xe->sriov.pf.service.version.base);
+ print_pf_version(p, "latest", &xe->sriov.pf.service.version.latest);
+
+ for (n = 1; n <= total_vfs; n++) {
+ version = &xe->sriov.pf.vfs[n].version;
+ if (!version->major && !version->minor)
+ continue;
+
+ print_pf_version(p, xe_sriov_function_name(n, name, sizeof(name)), version);
+ }
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_sriov_pf_service_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
new file mode 100644
index 000000000000..d38c18f5ed10
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_H_
+#define _XE_SRIOV_PF_SERVICE_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+
+void xe_sriov_pf_service_init(struct xe_device *xe);
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p);
+
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor);
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor);
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
new file mode 100644
index 000000000000..0835dde358c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_TYPES_H_
+#define _XE_SRIOV_PF_SERVICE_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_sriov_pf_service_version - VF/PF ABI Version.
+ * @major: the major version of the VF/PF ABI
+ * @minor: the minor version of the VF/PF ABI
+ *
+ * See `GuC Relay Communication`_.
+ */
+struct xe_sriov_pf_service_version {
+ u16 major;
+ u16 minor;
+};
+
+/**
+ * struct xe_sriov_pf_service - Data used by the PF service.
+ * @version: information about VF/PF ABI versions for current platform.
+ * @version.base: lowest VF/PF ABI version that could be negotiated with VF.
+ * @version.latest: latest VF/PF ABI version supported by the PF driver.
+ */
+struct xe_sriov_pf_service {
+ struct {
+ struct xe_sriov_pf_service_version base;
+ struct xe_sriov_pf_service_version latest;
+ } version;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
new file mode 100644
index 000000000000..956a88f9f213
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_TYPES_H_
+#define _XE_SRIOV_PF_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * struct xe_sriov_metadata - per-VF device level metadata
+ */
+struct xe_sriov_metadata {
+ /** @version: negotiated VF/PF ABI version */
+ struct xe_sriov_pf_service_version version;
+};
+
+/**
+ * struct xe_device_pf - Xe PF related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_PF mode.
+ */
+struct xe_device_pf {
+ /** @device_total_vfs: Maximum number of VFs supported by the device. */
+ u16 device_total_vfs;
+
+ /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
+ u16 driver_max_vfs;
+
+ /** @master_lock: protects all VFs configurations across GTs */
+ struct mutex master_lock;
+
+ /** @service: device level service data. */
+ struct xe_sriov_pf_service service;
+
+ /** @vfs: metadata for all VFs. */
+ struct xe_sriov_metadata *vfs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index ca94382a721e..1a138108d139 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -7,9 +7,6 @@
#define _XE_SRIOV_TYPES_H_
#include <linux/build_bug.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -40,37 +37,4 @@ enum xe_sriov_mode {
};
static_assert(XE_SRIOV_MODE_NONE);
-/**
- * struct xe_device_pf - Xe PF related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_PF mode.
- */
-struct xe_device_pf {
- /** @device_total_vfs: Maximum number of VFs supported by the device. */
- u16 device_total_vfs;
-
- /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
- u16 driver_max_vfs;
-
- /** @master_lock: protects all VFs configurations across GTs */
- struct mutex master_lock;
-};
-
-/**
- * struct xe_device_vf - Xe Virtual Function related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_VF mode.
- */
-struct xe_device_vf {
- /** @migration: VF Migration state data */
- struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
- } migration;
-};
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index c1275e64aa9c..26e243c28994 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -7,12 +7,15 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc_ct.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -121,6 +124,15 @@
* | | |
*/
+static bool vf_migration_supported(struct xe_device *xe)
+{
+ /*
+ * TODO: Add conditions to allow specific platforms, when they're
+ * supported at production quality.
+ */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
static void migration_worker_func(struct work_struct *w);
/**
@@ -130,86 +142,118 @@ static void migration_worker_func(struct work_struct *w);
void xe_sriov_vf_init_early(struct xe_device *xe)
{
INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ if (!vf_migration_supported(xe))
+ xe_sriov_info(xe, "migration not supported by this module version\n");
}
-/**
- * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+static bool gt_vf_post_migration_needed(struct xe_gt *gt)
+{
+ return test_bit(gt->info.id, &gt_to_xe(gt)->sriov.vf.migration.gt_flags);
+}
+
+/*
+ * Notify GuCs marked in flags about resource fixups apply finished.
* @xe: the &xe_device struct instance
- *
- * After migration, we need to re-query all VF configuration to make sure
- * they match previous provisioning. Note that most of VF provisioning
- * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
- *
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
+ * @gt_flags: flags marking to which GTs the notification shall be sent
*/
-static int vf_post_migration_requery_guc(struct xe_device *xe)
+static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
{
struct xe_gt *gt;
unsigned int id;
- int err, ret = 0;
+ int err = 0;
for_each_gt(gt, xe, id) {
- err = xe_gt_sriov_vf_query_config(gt);
- ret = ret ?: err;
+ if (!test_bit(id, &gt_flags))
+ continue;
+ /* skip asking GuC for RESFIX exit if new recovery request arrived */
+ if (gt_vf_post_migration_needed(gt))
+ continue;
+ err = xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (err)
+ break;
+ clear_bit(id, &gt_flags);
}
- return ret;
-}
-
-/*
- * vf_post_migration_imminent - Check if post-restore recovery is coming.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_post_migration_imminent(struct xe_device *xe)
-{
- return xe->sriov.vf.migration.gt_flags != 0 ||
- work_pending(&xe->sriov.vf.migration.worker);
+ if (gt_flags && !err)
+ drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
+ return err;
}
-/*
- * Notify all GuCs about resource fixups apply finished.
- */
-static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+static int vf_get_next_migrated_gt_id(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (vf_post_migration_imminent(xe))
- goto skip;
- xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return id;
}
- return;
+ return -1;
+}
-skip:
- drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+/**
+ * Perform post-migration fixups on a single GT.
+ *
+ * After migration, GuC needs to be re-queried for VF configuration to check
+ * if it matches previous provisioning. Most of VF provisioning shall be the
+ * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
+ * range has changed, we have to perform fixups - shift all GGTT references
+ * used anywhere within the driver. After the fixups in this function succeed,
+ * it is allowed to ask the GuC bound to this GT to continue normal operation.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int gt_vf_post_migration_fixups(struct xe_gt *gt)
+{
+ s64 shift;
+ int err;
+
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ return err;
+
+ shift = xe_gt_sriov_vf_ggtt_shift(gt);
+ if (shift) {
+ xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
+ /* FIXME: add the recovery steps */
+ xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
+ }
+ return 0;
}
static void vf_post_migration_recovery(struct xe_device *xe)
{
- int err;
+ unsigned long fixed_gts = 0;
+ int id, err;
drm_dbg(&xe->drm, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
- err = vf_post_migration_requery_guc(xe);
- if (vf_post_migration_imminent(xe))
- goto defer;
- if (unlikely(err))
+
+ if (!vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration not supported by this module version\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
+ struct xe_gt *gt = xe_device_get_gt(xe, id);
+
+ err = gt_vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ set_bit(id, &fixed_gts);
+ }
+
+ err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
+ if (err)
goto fail;
- /* FIXME: add the recovery steps */
- vf_post_migration_notify_resfix_done(xe);
xe_pm_runtime_put(xe);
drm_notice(&xe->drm, "migration recovery ended\n");
return;
-defer:
- xe_pm_runtime_put(xe);
- drm_dbg(&xe->drm, "migration recovery deferred\n");
- return;
fail:
xe_pm_runtime_put(xe);
drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
@@ -224,18 +268,23 @@ static void migration_worker_func(struct work_struct *w)
vf_post_migration_recovery(xe);
}
-static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+/*
+ * Check if post-restore recovery is coming on any of GTs.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
- xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
- return false;
- }
+ if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return true;
}
- return true;
+ return false;
}
/**
@@ -250,13 +299,9 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_VF(xe));
- if (!vf_ready_to_recovery_on_all_gts(xe))
+ if (!vf_ready_to_recovery_on_any_gts(xe))
return;
- WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
- /* Ensure other threads see that no flags are set now. */
- smp_mb();
-
started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
drm_info(&xe->drm, "VF migration recovery %s\n", started ?
"scheduled" : "already in progress");
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
new file mode 100644
index 000000000000..8300416a6226
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_TYPES_H_
+#define _XE_SRIOV_VF_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * struct xe_sriov_vf_relay_version - PF ABI version details.
+ */
+struct xe_sriov_vf_relay_version {
+ /** @major: major version. */
+ u16 major;
+ /** @minor: minor version. */
+ u16 minor;
+};
+
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @pf_version: negotiated VF/PF ABI version. */
+ struct xe_sriov_vf_relay_version pf_version;
+
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
index c77b5c317fa0..10e88f2c9615 100644
--- a/drivers/gpu/drm/xe/xe_step.c
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -5,6 +5,7 @@
#include "xe_step.h"
+#include <kunit/visibility.h>
#include <linux/bitfield.h>
#include "xe_device.h"
@@ -255,3 +256,4 @@ const char *xe_step_name(enum xe_step step)
return "**";
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_step_name);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index cb813b337fd3..41705f5d52e3 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -10,9 +10,11 @@
#include <linux/pci.h>
#include <linux/sysfs.h>
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
+#include "xe_i2c.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
#include "xe_vsec.h"
@@ -28,20 +30,32 @@
* This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
* to be flashed through mei and collect telemetry. The driver's probe flow is modified
* such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating
- * survivability mode and provides additional information required for debug
+ * denotes a failure.
*
- * KMD exposes below admin-only readable sysfs in survivability mode
+ * Survivability mode can also be entered manually using the survivability mode attribute available
+ * through configfs which is beneficial in several usecases. It can be used to address scenarios
+ * where pcode does not detect failure or for validation purposes. It can also be used in
+ * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node.
*
- * device/survivability_mode: The presence of this file indicates that the card is in survivability
- * mode. Also, provides additional information on why the driver entered
- * survivability mode.
+ * Use below command enable survivability mode manually::
*
- * Capability Information - Provides boot status
- * Postcode Information - Provides information about the failure
- * Overflow Information - Provides history of previous failures
- * Auxiliary Information - Certain failures may have information in
- * addition to postcode information
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+ * debug information::
+ *
+ * /sys/bus/pci/devices/<device>/surivability_mode
+ *
+ * Capability Information:
+ * Provides boot status
+ * Postcode Information:
+ * Provides information about the failure
+ * Overflow Information
+ * Provides history of previous failures
+ * Auxiliary Information
+ * Certain failures may have information in addition to postcode information
*/
static u32 aux_history_offset(u32 reg_value)
@@ -133,6 +147,7 @@ static void xe_survivability_mode_fini(void *arg)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct device *dev = &pdev->dev;
+ xe_configfs_clear_survivability_mode(pdev);
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
@@ -159,20 +174,22 @@ static int enable_survivability_mode(struct pci_dev *pdev)
survivability->mode = true;
ret = xe_heci_gsc_init(xe);
- if (ret) {
- /*
- * But if it fails, device can't enter survivability
- * so move it back for correct error handling
- */
- survivability->mode = false;
- return ret;
- }
+ if (ret)
+ goto err;
xe_vsec_init(xe);
+ ret = xe_i2c_probe(xe);
+ if (ret)
+ goto err;
+
dev_err(dev, "In Survivability Mode\n");
return 0;
+
+err:
+ survivability->mode = false;
+ return ret;
}
/**
@@ -186,23 +203,40 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe)
return xe->survivability.mode;
}
-/*
- * survivability_mode_requested - check if it's possible to enable
- * survivability mode and that was requested by firmware
+/**
+ * xe_survivability_mode_is_requested - check if it's possible to enable survivability
+ * mode that was requested by firmware or userspace
+ * @xe: xe device instance
*
- * This function reads the boot status from Pcode.
+ * This function reads configfs and boot status from Pcode.
*
* Return: true if platform support is available and boot status indicates
- * failure, false otherwise.
+ * failure or if survivability mode is requested, false otherwise.
*/
-static bool survivability_mode_requested(struct xe_device *xe)
+bool xe_survivability_mode_is_requested(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u32 data;
+ bool survivability_mode;
+
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ return false;
+
+ survivability_mode = xe_configfs_get_survivability_mode(pdev);
- if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe))
+ if (xe->info.platform < XE_BATTLEMAGE) {
+ if (survivability_mode) {
+ dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
+ xe_configfs_clear_survivability_mode(pdev);
+ }
return false;
+ }
+
+ /* Enable survivability mode if set via configfs */
+ if (survivability_mode)
+ return true;
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
@@ -226,7 +260,7 @@ int xe_survivability_mode_enable(struct xe_device *xe)
struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- if (!survivability_mode_requested(xe))
+ if (!xe_survivability_mode_is_requested(xe))
return 0;
survivability->size = MAX_SCRATCH_MMIO;
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index d7e64885570d..02231c2bf008 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -12,5 +12,6 @@ struct xe_device;
int xe_survivability_mode_enable(struct xe_device *xe);
bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 0b6547c06961..a7ff5975873f 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -3,20 +3,34 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_drv.h>
+
#include "xe_bo.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_pt.h"
#include "xe_svm.h"
+#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
- /* Not reliable without notifier lock */
- return range->base.flags.has_devmem_pages;
+ /*
+ * Advisory only check whether the range is currently backed by VRAM
+ * memory.
+ */
+
+ struct drm_gpusvm_range_flags flags = {
+ /* Pairs with WRITE_ONCE in drm_gpusvm.c */
+ .__flags = READ_ONCE(range->base.flags.__flags),
+ };
+
+ return flags.has_devmem_pages;
}
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
@@ -35,21 +49,6 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-static unsigned long xe_svm_range_start(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_start(&range->base);
-}
-
-static unsigned long xe_svm_range_end(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_end(&range->base);
-}
-
-static unsigned long xe_svm_range_size(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_size(&range->base);
-}
-
#define range_debug(r__, operaton__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
@@ -93,11 +92,6 @@ static void xe_svm_range_free(struct drm_gpusvm_range *range)
kfree(range);
}
-static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
-{
- return container_of(r, struct xe_svm_range, base);
-}
-
static void
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
const struct mmu_notifier_range *mmu_range)
@@ -151,7 +145,12 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- range->tile_invalidated |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
}
return tile_mask;
@@ -177,14 +176,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
{
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
- struct xe_tile *tile;
struct drm_gpusvm_range *r, *first;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
- u8 id;
- u32 fence_id = 0;
long err;
xe_svm_assert_in_notifier(vm);
@@ -230,42 +224,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- for_each_tile(tile, xe, id) {
- if (tile_mask & BIT(id)) {
- int err;
-
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
@@ -339,6 +299,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+
static struct xe_vram_region *page_to_vr(struct page *page)
{
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
@@ -525,16 +487,18 @@ static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
}
-static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
{
return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
}
-static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct xe_device *xe = xe_bo_device(bo);
xe_bo_put_async(bo);
+ xe_pm_runtime_put(xe);
}
static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
@@ -547,7 +511,7 @@ static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
return &tile->mem.vram.ttm.mm;
}
-static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
unsigned long npages, unsigned long *pfn)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
@@ -570,13 +534,15 @@ static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocatio
return 0;
}
-static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
.devmem_release = xe_svm_devmem_release,
.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
.copy_to_devmem = xe_svm_copy_to_devmem,
.copy_to_ram = xe_svm_copy_to_ram,
};
+#endif
+
static const struct drm_gpusvm_ops gpusvm_ops = {
.range_alloc = xe_svm_range_alloc,
.range_free = xe_svm_range_free,
@@ -645,78 +611,190 @@ void xe_svm_fini(struct xe_vm *vm)
}
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
- struct xe_tile *tile)
+ struct xe_tile *tile,
+ bool devmem_only)
+{
+ return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
+}
+
+/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ *
+ * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
+ * and migrates them to SMEM
+ */
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+ if (xe_svm_range_in_vram(range))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+}
+
+/**
+ * xe_svm_range_validate() - Check if the SVM range is valid
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ * @tile_mask: Mask representing the tiles to be checked
+ * @devmem_preferred : if true range needs to be in devmem
+ *
+ * The xe_svm_range_validate() function checks if a range is
+ * valid and located in the desired memory region.
+ *
+ * Return: true if the range is valid, false otherwise
+ */
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ bool ret;
+
+ xe_svm_notifier_lock(vm);
+
+ ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
+ (devmem_preferred == range->base.flags.has_devmem_pages);
+
+ xe_svm_notifier_unlock(vm);
+
+ return ret;
+}
+
+/**
+ * xe_svm_find_vma_start - Find start of CPU VMA
+ * @vm: xe_vm pointer
+ * @start: start address
+ * @end: end address
+ * @vma: Pointer to struct xe_vma
+ *
+ *
+ * This function searches for a cpu vma, within the specified
+ * range [start, end] in the given VM. It adjusts the range based on the
+ * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
+ *
+ * Return: The starting address of the VMA within the range,
+ * or ULONG_MAX if no VMA is found
+ */
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
{
- return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
+ return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
+ max(start, xe_vma_start(vma)),
+ min(end, xe_vma_end(vma)));
}
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
}
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
{
- struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = xe->drm.dev;
struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
struct list_head *blocks;
struct xe_bo *bo;
- ktime_t end = 0;
- int err;
+ ktime_t time_end = 0;
+ int err, idx;
- range_debug(range, "ALLOCATE VRAM");
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return -ENODEV;
- if (!mmget_not_zero(mm))
- return -EFAULT;
- mmap_read_lock(mm);
+ xe_pm_runtime_get(xe);
-retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
- xe_svm_range_size(range),
+ retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
ttm_bo_type_device,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_CPU_ADDR_MIRROR);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
+ if (xe_vm_validate_should_retry(NULL, err, &time_end))
goto retry;
- goto unlock;
+ goto out_pm_put;
}
- drm_gpusvm_devmem_init(&bo->devmem_allocation,
- vm->xe->drm.dev, mm,
- &gpusvm_devmem_ops,
- &tile->mem.vram.dpagemap,
- xe_svm_range_size(range));
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ end - start);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
block->private = vr;
xe_bo_get(bo);
- err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
- &bo->devmem_allocation, ctx);
+
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
if (err)
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
xe_bo_put(bo);
-unlock:
- mmap_read_unlock(mm);
- mmput(mm);
+out_pm_put:
+ xe_pm_runtime_put(xe);
+ drm_dev_exit(idx);
return err;
}
+#endif
+
+static bool supports_4K_migration(struct xe_device *xe)
+{
+ if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
+ * @range: SVM range for which migration needs to be decided
+ * @vma: vma which has range
+ * @preferred_region_is_vram: preferred region for range is vram
+ *
+ * Return: True for range needing migration and migration is supported else false
+ */
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ u64 range_size = xe_svm_range_size(range);
+
+ if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
+ return false;
+
+ xe_assert(vm->xe, IS_DGFX(vm->xe));
+
+ if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
+ if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
+ return false;
+ }
+
+ return true;
+}
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
* @vma: The CPU address mirror VMA.
- * @tile: The tile upon the fault occurred.
+ * @gt: The gt upon the fault occurred.
* @fault_addr: The GPU fault address.
* @atomic: The fault atomic access bit.
*
@@ -726,72 +804,87 @@ unlock:
* Return: 0 on success, negative error code on error.
*/
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
.devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
.check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
+ .devmem_only = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
+ .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ vm->xe->atomic_svm_timeslice_ms : 0,
};
struct xe_svm_range *range;
- struct drm_gpusvm_range *r;
- struct drm_exec exec;
struct dma_fence *fence;
+ struct xe_tile *tile = gt_to_tile(gt);
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
int err;
lockdep_assert_held_write(&vm->lock);
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
+
retry:
/* Always process UNMAPs first so view SVM ranges is current */
err = xe_svm_garbage_collector(vm);
if (err)
return err;
- r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
- xe_vma_start(vma), xe_vma_end(vma),
- &ctx);
- if (IS_ERR(r))
- return PTR_ERR(r);
+ range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
- range = to_xe_range(r);
- if (xe_svm_range_is_valid(range, tile))
+ if (IS_ERR(range))
+ return PTR_ERR(range);
+
+ if (ctx.devmem_only && !range->base.flags.migrate_devmem)
+ return -EACCES;
+
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
- /* XXX: Add migration policy, for now migrate range once */
- if (!range->skip_migrate && range->base.flags.migrate_devmem &&
- xe_svm_range_size(range) >= SZ_64K) {
- range->skip_migrate = true;
-
- err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ if (--migrate_try_count >= 0 &&
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ err = xe_svm_alloc_vram(tile, range, &ctx);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
- drm_dbg(&vm->xe->drm,
- "VRAM allocation failed, falling back to "
- "retrying fault, asid=%u, errno=%pe\n",
- vm->usm.asid, ERR_PTR(err));
- goto retry;
+ if (migrate_try_count || !ctx.devmem_only) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ return err;
+ }
}
}
range_debug(range, "GET PAGES");
- err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ if (migrate_try_count > 0 || !ctx.devmem_only) {
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
}
- drm_dbg(&vm->xe->drm,
- "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
- vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
- range_debug(range, "PAGE FAULT - RETRY PAGES");
- goto retry;
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
@@ -801,32 +894,21 @@ retry:
range_debug(range, "PAGE FAULT - BIND");
retry_bind:
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
- drm_exec_retry_on_contention(&exec);
- if (err) {
- drm_exec_fini(&exec);
- goto err_out;
- }
-
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- drm_exec_fini(&exec);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
- }
- if (xe_vm_validate_should_retry(&exec, err, &end))
- goto retry_bind;
- goto err_out;
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ xe_vm_unlock(vm);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
}
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry_bind;
+ goto err_out;
}
- drm_exec_fini(&exec);
-
- if (xe_modparam.always_migrate_to_vram)
- range->skip_migrate = false;
+ xe_vm_unlock(vm);
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -862,10 +944,85 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
*/
int xe_svm_bo_evict(struct xe_bo *bo)
{
- return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+ return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
+}
+
+/**
+ * xe_svm_range_find_or_insert- Find or insert GPU SVM range
+ * @vm: xe_vm pointer
+ * @addr: address for which range needs to be found/inserted
+ * @vma: Pointer to struct xe_vma which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a SVM range based on the
+ * address.
+ *
+ * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
+ */
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_range *r;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
+ xe_vma_start(vma), xe_vma_end(vma), ctx);
+ if (IS_ERR(r))
+ return ERR_PTR(PTR_ERR(r));
+
+ return to_xe_range(r);
+}
+
+/**
+ * xe_svm_range_get_pages() - Get pages for a SVM range
+ * @vm: Pointer to the struct xe_vm
+ * @range: Pointer to the xe SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a SVM range and ensures they are mapped for
+ * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ int err = 0;
+
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+
+/**
+ * xe_svm_alloc_vram()- Allocate device memory pages for range,
+ * migrating existing data.
+ * @tile: tile to allocate vram from
+ * @range: SVM range
+ * @ctx: DRM GPU SVM context
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_pagemap *dpagemap;
+
+ xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ range_debug(range, "ALLOCATE VRAM");
+
+ dpagemap = xe_tile_local_pagemap(tile);
+ return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
+ xe_svm_range_end(range),
+ range->base.gpusvm->mm,
+ ctx->timeslice_ms);
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
@@ -890,6 +1047,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
+ .populate_mm = xe_drm_pagemap_populate_mm,
};
/**
@@ -921,7 +1079,7 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
vr->pagemap.range.start = res->start;
vr->pagemap.range.end = res->end;
vr->pagemap.nr_range = 1;
- vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
vr->pagemap.owner = xe_svm_devm_owner(xe);
addr = devm_memremap_pages(dev, &vr->pagemap);
@@ -942,8 +1100,27 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#else
+int xe_svm_alloc_vram(struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
#endif
+
+/**
+ * xe_svm_flush() - SVM flush
+ * @vm: The VM.
+ *
+ * Flush all SVM actions.
+ */
+void xe_svm_flush(struct xe_vm *vm)
+{
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index e059590e5076..da9a69ea0bb1 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,16 +6,19 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
-struct xe_vram_region;
+struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
@@ -36,14 +39,8 @@ struct xe_svm_range {
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
- /**
- * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
- * locking.
- */
- u8 skip_migrate :1;
};
-#if IS_ENABLED(CONFIG_DRM_GPUSVM)
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
@@ -64,7 +61,7 @@ void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
@@ -72,7 +69,121 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
+
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx);
+
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram);
+
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
+
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred);
+
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+/**
+ * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
+ * @r: Pointer to the drm_gpusvm_range structure
+ *
+ * This function takes a pointer to a drm_gpusvm_range structure and
+ * converts it to a pointer to the containing xe_svm_range structure.
+ *
+ * Return: Pointer to the xe_svm_range structure
+ */
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+/**
+ * xe_svm_range_start() - SVM range start address
+ * @range: SVM range
+ *
+ * Return: start address of range.
+ */
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+/**
+ * xe_svm_range_end() - SVM range end address
+ * @range: SVM range
+ *
+ * Return: end address of range.
+ */
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+/**
+ * xe_svm_range_size() - SVM range size
+ * @range: SVM range
+ *
+ * Return: Size of range.
+ */
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+void xe_svm_flush(struct xe_vm *vm);
+
#else
+#include <linux/interval_tree.h>
+
+struct drm_pagemap_device_addr;
+struct drm_gpusvm_ctx;
+struct drm_gpusvm_range;
+struct xe_bo;
+struct xe_gt;
+struct xe_vm;
+struct xe_vma;
+struct xe_tile;
+struct xe_vram_region;
+
+#define XE_INTERCONNECT_VRAM 1
+
+struct xe_svm_range {
+ struct {
+ struct interval_tree_node itree;
+ const struct drm_pagemap_device_addr *dma_addr;
+ } base;
+ u32 tile_present;
+ u32 tile_invalidated;
+};
+
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
@@ -102,7 +213,7 @@ void xe_svm_close(struct xe_vm *vm)
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
@@ -124,27 +235,87 @@ static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
-#endif
-/**
- * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
- * @range: SVM range
- *
- * Return: True if SVM range has a DMA mapping, False otherwise
- */
-static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+static inline int
+xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
{
- lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
+ return -EOPNOTSUPP;
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+static inline
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ return ERR_PTR(-EINVAL);
+}
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+static inline
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ return -EINVAL;
+}
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return NULL;
+}
+
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return 0;
+}
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ return false;
+}
+
+static inline
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+}
+
+static inline
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ return false;
+}
+
+static inline
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
+{
+ return ULONG_MAX;
+}
+
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+#define xe_svm_range_has_dma_mapping(...) false
+
+static inline void xe_svm_notifier_lock(struct xe_vm *vm)
+{
+}
+
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+{
+}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0771acbbf367..86e9811e60ba 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -10,6 +10,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_memirq.h"
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
@@ -87,13 +88,9 @@
*/
static int xe_tile_alloc(struct xe_tile *tile)
{
- struct drm_device *drm = &tile_to_xe(tile)->drm;
-
- tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt),
- GFP_KERNEL);
+ tile->mem.ggtt = xe_ggtt_alloc(tile);
if (!tile->mem.ggtt)
return -ENOMEM;
- tile->mem.ggtt->tile = tile;
return 0;
}
@@ -178,6 +175,12 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
int xe_tile_init(struct xe_tile *tile)
{
+ int err;
+
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
+
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index eb939316d55b..cc33e8733983 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -16,4 +16,21 @@ int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram.dpagemap;
+}
+#else
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return NULL;
+}
+#endif
+
+static inline bool xe_tile_is_root(struct xe_tile *tile)
+{
+ return tile->id == 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
new file mode 100644
index 000000000000..f221dbed16f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gtt_defs.h"
+
+#include "xe_assert.h"
+#include "xe_ggtt.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_wopcm.h"
+
+static int vf_init_ggtt_balloons(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
+
+ tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_tile_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range.
+ * @tile: the &xe_tile struct instance
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+{
+ u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
+ u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 wopcm = xe_wopcm_size(xe);
+ u64 start, end;
+ int err;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(xe));
+ xe_tile_assert(tile, ggtt_size);
+ lockdep_assert_held(&tile->mem.ggtt->lock);
+
+ /*
+ * VF can only use part of the GGTT as allocated by the PF:
+ *
+ * WOPCM GUC_GGTT_TOP
+ * |<------------ Total GGTT size ------------------>|
+ *
+ * VF GGTT base -->|<- size ->|
+ *
+ * +--------------------+----------+-----------------+
+ * |////////////////////| block |\\\\\\\\\\\\\\\\\|
+ * +--------------------+----------+-----------------+
+ *
+ * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
+ */
+
+ if (ggtt_base < wopcm || ggtt_base > GUC_GGTT_TOP ||
+ ggtt_size > GUC_GGTT_TOP - ggtt_base) {
+ xe_sriov_err(xe, "tile%u: Invalid GGTT configuration: %#llx-%#llx\n",
+ tile->id, ggtt_base, ggtt_base + ggtt_size - 1);
+ return -ERANGE;
+ }
+
+ start = wopcm;
+ end = ggtt_base;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0],
+ start, end);
+ if (err)
+ return err;
+ }
+
+ start = ggtt_base + ggtt_size;
+ end = GUC_GGTT_TOP;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1],
+ start, end);
+ if (err) {
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int vf_balloon_ggtt(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ int err;
+
+ mutex_lock(&ggtt->lock);
+ err = xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+ mutex_unlock(&ggtt->lock);
+
+ return err;
+}
+
+/**
+ * xe_tile_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes.
+ * @tile: the &xe_tile struct instance
+ */
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void vf_deballoon_ggtt(struct xe_tile *tile)
+{
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ mutex_unlock(&tile->mem.ggtt->lock);
+}
+
+static void vf_fini_ggtt_balloons(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void cleanup_ggtt(struct drm_device *drm, void *arg)
+{
+ struct xe_tile *tile = arg;
+
+ vf_deballoon_ggtt(tile);
+ vf_fini_ggtt_balloons(tile);
+}
+
+/**
+ * xe_tile_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ int err;
+
+ err = vf_init_ggtt_balloons(tile);
+ if (err)
+ return err;
+
+ err = vf_balloon_ggtt(tile);
+ if (err) {
+ vf_fini_ggtt_balloons(tile);
+ return err;
+ }
+
+ return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, tile);
+}
+
+/**
+ * DOC: GGTT nodes shifting during VF post-migration recovery
+ *
+ * The first fixup applied to the VF KMD structures as part of post-migration
+ * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
+ * from range previously assigned to this VF, into newly provisioned area.
+ * The changes include balloons, which are resized accordingly.
+ *
+ * The balloon nodes are there to eliminate unavailable ranges from use: one
+ * reserves the GGTT area below the range for current VF, and another one
+ * reserves area above.
+ *
+ * Below is a GGTT layout of example VF, with a certain address range assigned to
+ * said VF, and inaccessible areas above and below:
+ *
+ * 0 4GiB
+ * |<--------------------------- Total GGTT size ----------------------------->|
+ * WOPCM GUC_TOP
+ * |<-------------- Area mappable by xe_ggtt instance ---------------->|
+ *
+ * +---+---------------------------------+----------+----------------------+---+
+ * |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
+ * +---+---------------------------------+----------+----------------------+---+
+ *
+ * Hardware enforced access rules before migration:
+ *
+ * |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
+ *
+ * GGTT nodes used for tracking allocations:
+ *
+ * |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
+ *
+ * After the migration, GGTT area assigned to the VF might have shifted, either
+ * to lower or to higher address. But we expect the total size and extra areas to
+ * be identical, as migration can only happen between matching platforms.
+ * Below is an example of GGTT layout of the VF after migration. Content of the
+ * GGTT for VF has been moved to a new area, and we receive its address from GuC:
+ *
+ * +---+----------------------+----------+---------------------------------+---+
+ * |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
+ * +---+----------------------+----------+---------------------------------+---+
+ *
+ * Hardware enforced access rules after migration:
+ *
+ * |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
+ *
+ * So the VF has a new slice of GGTT assigned, and during migration process, the
+ * memory content was copied to that new area. But the &xe_ggtt nodes are still
+ * tracking allocations using the old addresses. The nodes within VF owned area
+ * have to be shifted, and balloon nodes need to be resized to properly mask out
+ * areas not owned by the VF.
+ *
+ * Fixed &xe_ggtt nodes used for tracking allocations:
+ *
+ * |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
+ *
+ * Due to use of GPU profiles, we do not expect the old and new GGTT ares to
+ * overlap; but our node shifting will fix addresses properly regardless.
+ */
+
+/**
+ * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * @tile: the &xe_tile struct instance
+ * @shift: the shift value
+ *
+ * Since Global GTT is not virtualized, each VF has an assigned range
+ * within the global space. This range might have changed during migration,
+ * which requires all memory addresses pointing to GGTT to be shifted.
+ */
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ mutex_lock(&ggtt->lock);
+
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ xe_ggtt_shift_nodes_locked(ggtt, shift);
+ xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+
+ mutex_unlock(&ggtt->lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
new file mode 100644
index 000000000000..93eb043171e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_H_
+#define _XE_TILE_SRIOV_VF_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index ccebd5f0878e..86323cf3be2c 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_fast_assign(
__assign_str(dev);
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__entry->flags = bo->flags;
__entry->vm = bo->vm;
),
@@ -73,7 +73,7 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__assign_str(new_placement_name);
__assign_str(old_placement_name);
__assign_str(device_id);
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
index 5c669a0b2180..d525cbee1e34 100644
--- a/drivers/gpu/drm/xe/xe_trace_lrc.h
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -19,12 +19,12 @@
#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
TRACE_EVENT(xe_lrc_update_timestamp,
- TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_PROTO(struct xe_lrc *lrc, uint64_t old),
TP_ARGS(lrc, old),
TP_STRUCT__entry(
__field(struct xe_lrc *, lrc)
- __field(u32, old)
- __field(u32, new)
+ __field(u64, old)
+ __field(u64, new)
__string(name, lrc->fence_ctx.name)
__string(device_id, __dev_name_lrc(lrc))
),
@@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp,
__assign_str(name);
__assign_str(device_id);
),
- TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s",
__entry->lrc, __get_str(name),
__entry->old, __entry->new,
__get_str(device_id))
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 49ddbda7cdef..828b45b24c23 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -98,6 +98,11 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
+ { XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index c14bd2282044..465bda355443 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -33,6 +33,22 @@ uc_to_xe(struct xe_uc *uc)
}
/* Should be called once at driver load only */
+int xe_uc_init_noalloc(struct xe_uc *uc)
+{
+ int ret;
+
+ ret = xe_guc_init_noalloc(&uc->guc);
+ if (ret)
+ goto err;
+
+ /* HuC and GSC have no early dependencies and will be initialized during xe_uc_init(). */
+ return 0;
+
+err:
+ xe_gt_err(uc_to_gt(uc), "Failed to early initialize uC (%pe)\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_uc_init(struct xe_uc *uc)
{
int ret;
@@ -56,15 +72,17 @@ int xe_uc_init(struct xe_uc *uc)
if (!xe_device_uc_enabled(uc_to_xe(uc)))
return 0;
- if (IS_SRIOV_VF(uc_to_xe(uc)))
- return 0;
+ if (!IS_SRIOV_VF(uc_to_xe(uc))) {
+ ret = xe_wopcm_init(&uc->wopcm);
+ if (ret)
+ goto err;
+ }
- ret = xe_wopcm_init(&uc->wopcm);
+ ret = xe_guc_min_load_for_hwconfig(&uc->guc);
if (ret)
goto err;
return 0;
-
err:
xe_gt_err(uc_to_gt(uc), "Failed to initialize uC (%pe)\n", ERR_PTR(ret));
return ret;
@@ -126,28 +144,7 @@ int xe_uc_sanitize_reset(struct xe_uc *uc)
return uc_reset(uc);
}
-/**
- * xe_uc_init_hwconfig - minimally init Uc, read and parse hwconfig
- * @uc: The UC object
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_uc_init_hwconfig(struct xe_uc *uc)
-{
- int ret;
-
- /* GuC submission not enabled, nothing to do */
- if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
-
- ret = xe_guc_min_load_for_hwconfig(&uc->guc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int vf_uc_init_hw(struct xe_uc *uc)
+static int vf_uc_load_hw(struct xe_uc *uc)
{
int err;
@@ -161,22 +158,30 @@ static int vf_uc_init_hw(struct xe_uc *uc)
err = xe_gt_sriov_vf_connect(uc_to_gt(uc));
if (err)
- return err;
+ goto err_out;
uc->guc.submission_state.enabled = true;
+ err = xe_guc_opt_in_features_enable(&uc->guc);
+ if (err)
+ goto err_out;
+
err = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (err)
- return err;
+ goto err_out;
return 0;
+
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return err;
}
/*
* Should be called during driver load, after every GT reset, and after every
* suspend to reload / auth the firmwares.
*/
-int xe_uc_init_hw(struct xe_uc *uc)
+int xe_uc_load_hw(struct xe_uc *uc)
{
int ret;
@@ -185,7 +190,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
return 0;
if (IS_SRIOV_VF(uc_to_xe(uc)))
- return vf_uc_init_hw(uc);
+ return vf_uc_load_hw(uc);
ret = xe_huc_upload(&uc->huc);
if (ret)
@@ -201,15 +206,15 @@ int xe_uc_init_hw(struct xe_uc *uc)
ret = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_post_load_init(&uc->guc);
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_pc_start(&uc->guc.pc);
if (ret)
- return ret;
+ goto err_out;
xe_guc_engine_activity_enable_stats(&uc->guc);
@@ -221,11 +226,10 @@ int xe_uc_init_hw(struct xe_uc *uc)
xe_gsc_load_start(&uc->gsc);
return 0;
-}
-int xe_uc_fini_hw(struct xe_uc *uc)
-{
- return xe_uc_sanitize_reset(uc);
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return ret;
}
int xe_uc_reset_prepare(struct xe_uc *uc)
@@ -244,7 +248,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
- xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -278,6 +282,12 @@ again:
goto again;
}
+void xe_uc_suspend_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 3813c1ede450..21c9306098cf 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -8,16 +8,16 @@
struct xe_uc;
+int xe_uc_init_noalloc(struct xe_uc *uc);
int xe_uc_init(struct xe_uc *uc);
-int xe_uc_init_hwconfig(struct xe_uc *uc);
int xe_uc_init_post_hwconfig(struct xe_uc *uc);
-int xe_uc_init_hw(struct xe_uc *uc);
-int xe_uc_fini_hw(struct xe_uc *uc);
+int xe_uc_load_hw(struct xe_uc *uc);
void xe_uc_gucrc_disable(struct xe_uc *uc);
int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
+void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index fb0eda3d5682..9bbdde604923 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -16,6 +16,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -92,6 +93,8 @@
struct uc_fw_entry {
enum xe_platform platform;
+ enum xe_gt_type gt_type;
+
struct {
const char *path;
u16 major;
@@ -106,32 +109,39 @@ struct fw_blobs_by_type {
u32 count;
};
-#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
+/*
+ * Add an "ANY" define just to convey the meaning it's given here.
+ */
+#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
+ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \
+ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
+ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
- fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
- fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
- fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
- fw_def(DG1, no_ver(i915, huc, dg1)) \
- fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
- fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
- fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
- fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, no_ver(xe, huc, ptl)) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl))
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -159,12 +169,13 @@ struct fw_blobs_by_type {
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \
MODULE_FIRMWARE(fw_filename);
-#define XE_UC_FW_ENTRY(platform__, entry__) \
+#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \
{ \
.platform = XE_ ## platform__, \
+ .gt_type = XE_ ## gt_type__, \
entry__, \
},
@@ -222,30 +233,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
[XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
[XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
};
- static const struct uc_fw_entry *entries;
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
enum xe_platform p = xe->info.platform;
+ const struct uc_fw_entry *entries;
u32 count;
int i;
- xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED);
+
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
for (i = 0; i < count && p <= entries[i].platform; i++) {
- if (p == entries[i].platform) {
- uc_fw->path = entries[i].path;
- uc_fw->versions.wanted.major = entries[i].major;
- uc_fw->versions.wanted.minor = entries[i].minor;
- uc_fw->versions.wanted.patch = entries[i].patch;
- uc_fw->full_ver_required = entries[i].full_ver_required;
-
- if (uc_fw->type == XE_UC_FW_TYPE_GSC)
- uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
- else
- uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
-
- break;
- }
+ if (p != entries[i].platform)
+ continue;
+
+ if (entries[i].gt_type != XE_GT_TYPE_ANY &&
+ entries[i].gt_type != gt->info.type)
+ continue;
+
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
}
}
@@ -646,11 +665,39 @@ do { \
ver_->major, ver_->minor, ver_->patch); \
} while (0)
+static void uc_fw_vf_override(struct xe_uc_fw *uc_fw)
+{
+ struct xe_uc_fw_version *compat = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted;
+
+ /* Only GuC/HuC are supported */
+ if (uc_fw->type != XE_UC_FW_TYPE_GUC && uc_fw->type != XE_UC_FW_TYPE_HUC)
+ uc_fw->path = NULL;
+
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+
+ if (!xe_uc_fw_is_supported(uc_fw))
+ return;
+
+ /* PF is doing the loading, so we don't need a path on the VF */
+ uc_fw->path = "Loaded by PF";
+
+ /* The GuC versions are set up during the VF bootstrap */
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ xe_gt_sriov_vf_guc_versions(uc_fw_to_gt(uc_fw), wanted, compat);
+ }
+}
+
static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct drm_printer p = xe_gt_info_printer(gt);
struct device *dev = xe->drm.dev;
- struct drm_printer p = drm_info_printer(dev);
const struct firmware *fw = NULL;
int err;
@@ -659,20 +706,13 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
- xe_assert(xe, !uc_fw->status);
- xe_assert(xe, !uc_fw->path);
+ xe_gt_assert(gt, !uc_fw->status);
+ xe_gt_assert(gt, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
if (IS_SRIOV_VF(xe)) {
- /* Only GuC/HuC are supported */
- if (uc_fw->type != XE_UC_FW_TYPE_GUC &&
- uc_fw->type != XE_UC_FW_TYPE_HUC)
- uc_fw->path = NULL;
- /* VF will support only firmwares that driver can autoselect */
- xe_uc_fw_change_status(uc_fw, uc_fw->path ?
- XE_UC_FIRMWARE_PRELOADED :
- XE_UC_FIRMWARE_NOT_SUPPORTED);
+ uc_fw_vf_override(uc_fw);
return 0;
}
@@ -684,7 +724,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
if (!xe_uc_fw_is_supported(uc_fw)) {
if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
- drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ xe_gt_err(gt, "No GuC firmware defined for platform\n");
return -ENOENT;
}
return 0;
@@ -693,7 +733,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED);
- drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type));
+ xe_gt_dbg(gt, "%s disabled\n", xe_uc_fw_type_repr(uc_fw->type));
return 0;
}
@@ -726,10 +766,10 @@ fail:
XE_UC_FIRMWARE_MISSING :
XE_UC_FIRMWARE_ERROR);
- drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n",
- xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n",
- xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
+ xe_gt_notice(gt, "%s firmware %s: fetch failed with error %pe\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, ERR_PTR(err));
+ xe_gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ad3b35a0e6eb..914026015019 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -65,6 +65,8 @@ enum xe_uc_fw_type {
* struct xe_uc_fw_version - Version for XE micro controller firmware
*/
struct xe_uc_fw_version {
+ /** @branch: branch version of the FW (not always available) */
+ u16 branch;
/** @major: major version of the FW */
u16 major;
/** @minor: minor version of the FW */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 60303998bd61..2035604121e6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -732,7 +732,9 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
+ down_read(&vm->userptr.notifier_lock);
err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->userptr.notifier_lock);
xe_vm_unlock(vm);
if (err)
break;
@@ -798,21 +800,47 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
}
ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
+static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
+{
+ struct xe_vma *vma;
+
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
+ xa_destroy(&op->prefetch_range.range);
+}
+
+static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return;
+
+ list_for_each_entry(op, &vops->list, link)
+ xe_vma_svm_prefetch_op_fini(op);
+}
+
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
int i;
+ xe_vma_svm_prefetch_ops_fini(vops);
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
kfree(vops->pt_update_ops[i].ops);
}
-static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
{
int i;
+ if (!inc_val)
+ return;
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
if (BIT(i) & tile_mask)
- ++vops->pt_update_ops[i].num_ops;
+ vops->pt_update_ops[i].num_ops += inc_val;
}
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
@@ -842,7 +870,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -977,7 +1005,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
xe_vm_populate_range_rebind(op, vma, range, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -1062,7 +1090,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
xe_vm_populate_range_unbind(op, range);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
return 0;
}
@@ -1678,13 +1706,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
* scheduler drops all the references of it, hence protecting the VM
* for this case is necessary.
*/
- if (flags & XE_VM_FLAG_LR_MODE)
+ if (flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
+ }
+
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
+ }
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
err = -ENOMEM;
- goto err_no_resv;
+ goto err_svm_fini;
}
drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
@@ -1724,10 +1760,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (vm->flags & XE_VM_FLAG_LR_MODE) {
- INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ if (vm->flags & XE_VM_FLAG_LR_MODE)
vm->batch_invalidate_tlb = false;
- }
/* Fill pt_root after allocating scratch tables */
for_each_tile(tile, xe, id) {
@@ -1757,12 +1791,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
}
- if (flags & XE_VM_FLAG_FAULT_MODE) {
- err = xe_svm_init(vm);
- if (err)
- goto err_close;
- }
-
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
@@ -1776,6 +1804,11 @@ err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
+err_svm_fini:
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ vm->size = 0; /* close the vm */
+ xe_svm_fini(vm);
+ }
err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
@@ -2049,7 +2082,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.needs_scratch))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
@@ -2135,6 +2169,35 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
+{
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_vm_find_vma_by_addr() - Find a VMA by its address
+ *
+ * @vm: the xe_vm the vma belongs to
+ * @page_addr: address to look up
+ */
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
+{
+ struct xe_vma *vma = NULL;
+
+ if (vm->usm.last_fault_vma) { /* Fast lookup */
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
+ vma = vm->usm.last_fault_vma;
+ }
+ if (!vma)
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
+
+ return vma;
+}
+
static const u32 region_to_mem_type[] = {
XE_PL_TT,
XE_PL_VRAM0,
@@ -2201,13 +2264,39 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
}
#endif
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
+{
+ if (!xe_vm_in_fault_mode(vm))
+ return false;
+
+ if (!xe_vm_has_scratch(vm))
+ return false;
+
+ if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+ return false;
+
+ return true;
+}
+
+static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *__op;
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ xe_vma_svm_prefetch_op_fini(op);
+ }
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
*/
static struct drm_gpuva_ops *
-vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
- u64 bo_offset_or_userptr, u64 addr, u64 range,
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct xe_bo *bo, u64 bo_offset_or_userptr,
+ u64 addr, u64 range,
u32 operation, u32 flags,
u32 prefetch_region, u16 pat_index)
{
@@ -2215,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct drm_gpuvm_bo *vm_bo;
+ u64 range_end = addr + range;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -2273,15 +2363,83 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ op->map.invalidate_on_bind =
+ __xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
- op->prefetch.region = prefetch_region;
- }
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+ u32 i;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
+ op->prefetch.region = prefetch_region;
+ break;
+ }
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+
+ for_each_tile(tile, vm->xe, id)
+ tile_mask |= 0x1 << id;
+ xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
+ op->prefetch_range.region = prefetch_region;
+ op->prefetch_range.ranges_count = 0;
+alloc_next_range:
+ svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
+
+ if (PTR_ERR(svm_range) == -ENOENT) {
+ u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
+
+ addr = ret == ULONG_MAX ? 0 : ret;
+ if (addr)
+ goto alloc_next_range;
+ else
+ goto print_op_label;
+ }
+
+ if (IS_ERR(svm_range)) {
+ err = PTR_ERR(svm_range);
+ goto unwind_prefetch_ops;
+ }
+
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
+ goto check_next_range;
+ }
+
+ err = xa_alloc(&op->prefetch_range.range,
+ &i, svm_range, xa_limit_32b,
+ GFP_KERNEL);
+
+ if (err)
+ goto unwind_prefetch_ops;
+
+ op->prefetch_range.ranges_count++;
+ vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
+check_next_range:
+ if (range_end > xe_svm_range_end(svm_range) &&
+ xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
+ addr = xe_svm_range_end(svm_range);
+ goto alloc_next_range;
+ }
+ }
+print_op_label:
print_op(vm->xe, __op);
}
return ops;
+
+unwind_prefetch_ops:
+ xe_svm_prefetch_gpuva_ops_fini(ops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(err);
}
+
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
@@ -2472,10 +2630,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror)
+ if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror) ||
+ op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
- op->tile_mask);
+ op->tile_mask, 1);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2484,6 +2643,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
gpuva_to_vma(op->base.remap.unmap->va);
bool skip = xe_vma_is_cpu_addr_mirror(old);
u64 start = xe_vma_start(old), end = xe_vma_end(old);
+ int num_remap_ops = 0;
if (op->base.remap.prev)
start = op->base.remap.prev->va.addr +
@@ -2536,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
@@ -2565,11 +2725,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
if (!skip)
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
+
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2581,7 +2743,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return -EBUSY;
if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
break;
case DRM_GPUVA_OP_PREFETCH:
vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2592,8 +2754,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return err;
}
- if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
+ op->prefetch_range.ranges_count);
+ else
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
+
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2719,6 +2885,57 @@ static int check_ufence(struct xe_vma *vma)
return 0;
}
+static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ int err = 0;
+
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ unsigned long i;
+ u32 region;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ return 0;
+
+ region = op->prefetch_range.region;
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = devmem_possible;
+ ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+
+ /* TODO: Threading the migration */
+ xa_for_each(&op->prefetch_range.range, i, svm_range) {
+ if (!region)
+ xe_svm_range_migrate_to_smem(vm, svm_range);
+
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
+ tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ err = xe_svm_alloc_vram(tile, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ return -ENODATA;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
+ }
+
+ err = xe_svm_range_get_pages(vm, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
+ err = -ENODATA;
+ return err;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+ }
+
+ return err;
+}
+
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma_op *op)
{
@@ -2726,9 +2943,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = vma_lock_and_validate(exec, op->map.vma,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ if (!op->map.invalidate_on_bind)
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -2755,7 +2973,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- u32 region = op->prefetch.region;
+ u32 region;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ region = op->prefetch_range.region;
+ else
+ region = op->prefetch.region;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2774,6 +2997,25 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
return err;
}
+static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+ int err;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return 0;
+
+ list_for_each_entry(op, &vops->list, link) {
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ err = prefetch_ranges(vm, op);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
struct xe_vm *vm,
struct xe_vma_ops *vops)
@@ -3082,9 +3324,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
- err = __copy_from_user(*bind_ops, bind_user,
- sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds);
+ err = copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
@@ -3109,7 +3351,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
(!xe_vm_in_fault_mode(vm) ||
- !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3215,6 +3457,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
vops->q = q;
vops->syncs = syncs;
vops->num_syncs = num_syncs;
+ vops->flags = 0;
}
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
@@ -3223,9 +3466,9 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
{
u16 coh_mode;
- if (XE_IOCTL_DBG(xe, range > bo->size) ||
+ if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
XE_IOCTL_DBG(xe, obj_offset >
- bo->size - range)) {
+ xe_bo_size(bo) - range)) {
return -EINVAL;
}
@@ -3243,7 +3486,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
- return -EINVAL;
+ return -EINVAL;
}
}
@@ -3251,7 +3494,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
- return -EINVAL;
+ return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
@@ -3260,7 +3503,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
- return -EINVAL;
+ return -EINVAL;
}
/* If a BO is protected it can only be mapped if the key is still valid */
@@ -3312,8 +3555,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
/* Ensure all UNMAPs visible */
- if (xe_vm_in_fault_mode(vm))
- flush_work(&vm->svm.garbage_collector.work);
+ xe_svm_flush(vm);
err = down_write_killable(&vm->lock);
if (err)
@@ -3423,7 +3665,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
u16 pat_index = bind_ops[i].pat_index;
- ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
addr, range, op, flags,
prefetch_region, pat_index);
if (IS_ERR(ops[i])) {
@@ -3456,6 +3698,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
+ err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
+ if (err)
+ goto unwind_ops;
+
fence = vm_bind_ioctl_ops_execute(vm, &vops);
if (IS_ERR(fence))
err = PTR_ERR(fence);
@@ -3525,7 +3771,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
xe_vma_ops_init(&vops, vm, q, NULL, 0);
- ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo),
DRM_XE_VM_BIND_OP_MAP, 0, 0,
vm->xe->pat.idx[cache_lvl]);
if (IS_ERR(ops)) {
@@ -3597,6 +3843,68 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
+ * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * address range
+ * @vm: The VM
+ * @start: start address
+ * @end: end address
+ * @tile_mask: mask for which gt's issue tlb invalidation
+ *
+ * Issue a range based TLB invalidation for gt's in tilemask
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
+{
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tile *tile;
+ u32 fence_id = 0;
+ u8 id;
+ int err;
+
+ if (!tile_mask)
+ return 0;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+ return err;
+}
+
+/**
* xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
* @vma: VMA to invalidate
*
@@ -3609,28 +3917,34 @@ void xe_vm_unlock(struct xe_vm *vm)
int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
- u8 id;
- u32 fence_id = 0;
+ u8 tile_mask = 0;
int ret = 0;
+ u8 id;
xe_assert(xe, !xe_vma_is_null(vma));
xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ vm_dbg(&vm->xe->drm,
"INVALIDATE: addr=0x%016llx, range=0x%016llx",
xe_vma_start(vma), xe_vma_size(vma));
- /* Check that we don't race with page-table updates */
+ /*
+ * Check that we don't race with page-table updates, tile_invalidated
+ * update is safe
+ */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
+ lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
+
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
to_userptr_vma(vma)->userptr.notifier_seq));
- WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
} else {
@@ -3638,39 +3952,17 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- for_each_tile(tile, xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes(tile, vma))
+ tile_mask |= BIT(id);
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id],
- true);
+ xe_device_wmb(xe);
- ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
- vma->tile_invalidated = vma->tile_mask;
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
}
@@ -3847,6 +4139,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
+
+ if (drm_coredump_printer_is_full(p))
+ return;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 0ef811fc2bde..3475a118f666 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -169,6 +169,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
!xe_vma_is_cpu_addr_mirror(vma);
}
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -226,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
+
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
@@ -301,6 +306,94 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+/**
+ * xe_vm_set_validating() - Register this task as currently making bos resident
+ * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
+ * validating.
+ * @vm: Pointer to the vm or NULL.
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ *
+ * Return: A pin cookie that should be used for xe_vm_clear_validating().
+ */
+static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
+ bool allow_res_evict)
+{
+ struct pin_cookie cookie = {};
+
+ if (vm && !allow_res_evict) {
+ xe_vm_assert_held(vm);
+ cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, current);
+ }
+
+ return cookie;
+}
+
+/**
+ * xe_vm_clear_validating() - Unregister this task as currently making bos resident
+ * @vm: Pointer to the vm or NULL
+ * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
+ * value as for xe_vm_set_validation().
+ * @cookie: Cookie obtained from xe_vm_set_validating().
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ */
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
+ struct pin_cookie cookie)
+{
+ if (vm && !allow_res_evict) {
+ lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, NULL);
+ }
+}
+
+/**
+ * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
+ * by the current task.
+ * @vm: Pointer to the vm.
+ *
+ * If this function returns %true, we should be in a vm resv locked region, since
+ * the current process is the same task that called xe_vm_set_validating().
+ * The function asserts that that's indeed the case.
+ *
+ * Return: %true if the task is currently making bos resident, %false otherwise.
+ */
+static inline bool xe_vm_is_validating(struct xe_vm *vm)
+{
+ /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
+ if (READ_ONCE(vm->validating) == current) {
+ xe_vm_assert_held(vm);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 84fa41b9fa20..8a07feef503b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -100,14 +100,21 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @tile_invalidated: VMA has been invalidated */
+ /**
+ * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
+ * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
+ * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * the vm->resv. For stable reading, BO's resv or userptr
+ * vm->userptr.notifier_lock in read mode is required. Can be
+ * opportunistically read with READ_ONCE outside of locks.
+ */
u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
/**
- * @tile_present: GT mask of binding are present for this VMA.
+ * @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
@@ -259,7 +266,7 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifer_lock in
+ * items to the list requires either the @userptr.notifier_lock in
* write mode, OR @lock in write mode.
*/
struct list_head invalidated;
@@ -310,6 +317,14 @@ struct xe_vm {
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
+ /**
+ * @validating: The task that is currently making bos resident for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -330,6 +345,8 @@ struct xe_vma_op_map {
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
+ /** @invalidate: invalidate the VMA before bind */
+ bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
@@ -372,6 +389,16 @@ struct xe_vma_op_unmap_range {
struct xe_svm_range *range;
};
+/** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
+struct xe_vma_op_prefetch_range {
+ /** @range: xarray for SVM ranges data */
+ struct xarray range;
+ /** @ranges_count: number of svm ranges to map */
+ u32 ranges_count;
+ /** @region: memory region to prefetch to */
+ u32 region;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -414,6 +441,8 @@ struct xe_vma_op {
struct xe_vma_op_map_range map_range;
/** @unmap_range: VMA unmap range operation specific data */
struct xe_vma_op_unmap_range unmap_range;
+ /** @prefetch_range: VMA prefetch range operation specific data */
+ struct xe_vma_op_prefetch_range prefetch_range;
};
};
@@ -431,6 +460,9 @@ struct xe_vma_ops {
u32 num_syncs;
/** @pt_update_ops: page table update operations */
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+ /** @flag: signify the properties within xe_vma_ops*/
+#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+ u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
bool inject_error;
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index b1f81dca610d..e421a74fb87c 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
*/
static void resize_vram_bar(struct xe_device *xe)
{
- u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
resource_size_t current_size;
@@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe)
if (!bar_size_mask)
return;
+ if (force_vram_bar_size < 0)
+ return;
+
/* set to a specific size? */
if (force_vram_bar_size) {
u32 bar_size_bit;
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
index b378848d3b7b..8f23a27871b6 100644
--- a/drivers/gpu/drm/xe/xe_vsec.c
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -24,6 +24,7 @@
#define BMG_DEVICE_ID 0xE2F8
static struct intel_vsec_header bmg_telemetry = {
+ .rev = 1,
.length = 0x10,
.id = VSEC_ID_TELEMETRY,
.num_entries = 2,
@@ -32,28 +33,19 @@ static struct intel_vsec_header bmg_telemetry = {
.offset = BMG_DISCOVERY_OFFSET,
};
-static struct intel_vsec_header bmg_punit_crashlog = {
+static struct intel_vsec_header bmg_crashlog = {
+ .rev = 1,
.length = 0x10,
.id = VSEC_ID_CRASHLOG,
- .num_entries = 1,
- .entry_size = 4,
+ .num_entries = 2,
+ .entry_size = 6,
.tbir = 0,
.offset = BMG_DISCOVERY_OFFSET + 0x60,
};
-static struct intel_vsec_header bmg_oobmsm_crashlog = {
- .length = 0x10,
- .id = VSEC_ID_CRASHLOG,
- .num_entries = 1,
- .entry_size = 4,
- .tbir = 0,
- .offset = BMG_DISCOVERY_OFFSET + 0x78,
-};
-
static struct intel_vsec_header *bmg_capabilities[] = {
&bmg_telemetry,
- &bmg_punit_crashlog,
- &bmg_oobmsm_crashlog,
+ &bmg_crashlog,
NULL
};
@@ -149,8 +141,8 @@ static int xe_guid_decode(u32 guid, int *index, u32 *offset)
return 0;
}
-static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
- u32 count)
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
{
struct xe_device *xe = pdev_to_xe_device(pdev);
void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
index 5777c53faec2..dabfb4e02d70 100644
--- a/drivers/gpu/drm/xe/xe_vsec.h
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -4,8 +4,12 @@
#ifndef _XE_VSEC_H_
#define _XE_VSEC_H_
+#include <linux/types.h>
+
+struct pci_dev;
struct xe_device;
void xe_vsec_init(struct xe_device *xe);
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, u32 count);
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 24f644c0a673..22a98600fd8f 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -10,6 +10,7 @@
#include <linux/compiler_types.h>
#include <linux/fault-inject.h>
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
#include "regs/xe_engine_regs.h"
@@ -230,6 +231,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16025250150"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+ LSN_LNI_WGT(1) |
+ LSN_LNE_WGT(1) |
+ LSN_DIM_X_WGT(1) |
+ LSN_DIM_Y_WGT(1) |
+ LSN_DIM_Z_WGT(1)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021867713"),
@@ -273,6 +286,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
{ XE_RTP_NAME("14021486841"),
XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
ENGINE_CLASS(VIDEO_DECODE)),
@@ -491,10 +516,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
- { XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
- },
/*
* These two workarounds are the same, just applying to different
* engines. Although Wa_18032095049 (for the RCS) isn't required on
@@ -521,31 +542,38 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe2_HPG */
{ XE_RTP_NAME("16018712365"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
},
{ XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
},
{ XE_RTP_NAME("14019988906"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
{ XE_RTP_NAME("14019877138"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
},
{ XE_RTP_NAME("14020338487"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
},
{ XE_RTP_NAME("18032247524"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
},
{ XE_RTP_NAME("14018471104"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
/*
@@ -554,7 +582,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
* apply this to all engines for simplicity.
*/
{ XE_RTP_NAME("16021639441"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002)),
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
GHWSP_CSB_REPORT_DIS |
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
@@ -566,11 +594,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
- { XE_RTP_NAME("14021821874"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ { XE_RTP_NAME("14021821874, 14022954250"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
@@ -628,6 +657,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -762,7 +795,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
{ XE_RTP_NAME("18033852989"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
},
{ XE_RTP_NAME("14021567978"),
@@ -795,7 +828,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
},
{ XE_RTP_NAME("14019386621"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
},
{ XE_RTP_NAME("14020756599"),
@@ -812,9 +845,17 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
{ XE_RTP_NAME("15016589081"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
@@ -836,9 +877,34 @@ static __maybe_unused const struct xe_rtp_entry oob_was[] = {
static_assert(ARRAY_SIZE(oob_was) - 1 == _XE_WA_OOB_COUNT);
+static __maybe_unused const struct xe_rtp_entry device_oob_was[] = {
+#include <generated/xe_device_wa_oob.c>
+ {}
+};
+
+static_assert(ARRAY_SIZE(device_oob_was) - 1 == _XE_DEVICE_WA_OOB_COUNT);
+
__diag_pop();
/**
+ * xe_wa_process_device_oob - process OOB workaround table
+ * @xe: device instance to process workarounds for
+ *
+ * process OOB workaround table for this device, marking in @xe the
+ * workarounds that are active.
+ */
+
+void xe_wa_process_device_oob(struct xe_device *xe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(xe);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was));
+
+ xe->wa_active.oob_initialized = true;
+ xe_rtp_process(&ctx, device_oob_was);
+}
+
+/**
* xe_wa_process_oob - process OOB workaround table
* @gt: GT instance to process workarounds for
*
@@ -907,6 +973,28 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
}
/**
+ * xe_wa_device_init - initialize device with workaround oob bookkeeping
+ * @xe: Xe device instance to initialize
+ *
+ * Returns 0 for success, negative with error code otherwise
+ */
+int xe_wa_device_init(struct xe_device *xe)
+{
+ unsigned long *p;
+
+ p = drmm_kzalloc(&xe->drm,
+ sizeof(*p) * BITS_TO_LONGS(ARRAY_SIZE(device_oob_was)),
+ GFP_KERNEL);
+
+ if (!p)
+ return -ENOMEM;
+
+ xe->wa_active.oob = p;
+
+ return 0;
+}
+
+/**
* xe_wa_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
@@ -940,6 +1028,16 @@ int xe_wa_init(struct xe_gt *gt)
}
ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "Device OOB Workarounds\n");
+ for_each_set_bit(idx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was))
+ if (device_oob_was[idx].name)
+ drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name);
+}
+
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index 52337405b5bc..f3880c65cb8d 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -13,17 +13,19 @@ struct xe_gt;
struct xe_hw_engine;
struct xe_tile;
+int xe_wa_device_init(struct xe_device *xe);
int xe_wa_init(struct xe_gt *gt);
+void xe_wa_process_device_oob(struct xe_device *xe);
void xe_wa_process_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, that don't fit the lifecycle any
- * other more specific type
+ * XE_WA - Out-of-band workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
@@ -32,4 +34,20 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
+/**
+ * XE_DEVICE_WA - Out-of-band Device workarounds, to be queried and called
+ * as needed.
+ * @xe__: xe_device
+ * @id__: XE_DEVICE_WA_OOB_<id__>, as generated by build system in generated/xe_device_wa_oob.h
+ */
+#define XE_DEVICE_WA(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ test_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
+#define XE_DEVICE_WA_DISABLE(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ clear_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 9b9e176992a8..e990f20eccfe 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -21,7 +21,8 @@
GRAPHICS_VERSION_RANGE(1270, 1274)
MEDIA_VERSION(1300)
PLATFORM(DG2)
-14018094691 GRAPHICS_VERSION(2004)
+14018094691 GRAPHICS_VERSION_RANGE(2001, 2002)
+ GRAPHICS_VERSION(2004)
14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
18024947630 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
@@ -30,17 +31,19 @@
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION(3001)
-14022293748 GRAPHICS_VERSION(2001)
+14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
-22019794406 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(3003)
+22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
+ GRAPHICS_VERSION(3003)
22019338487 MEDIA_VERSION(2000)
- GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
-16023588340 GRAPHICS_VERSION(2001)
+16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
no_media_l3 MEDIA_VERSION(3000)
@@ -57,3 +60,15 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
+16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
+ MEDIA_VERSION_RANGE(1300, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
+
+# SoC workaround - currently applies to all platforms with the following
+# primary GT GMDID
+14022085890 GRAPHICS_VERSION(2001)
+
+15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index dfa78a49a6d9..806ec66ee7f7 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -54,6 +54,7 @@ static const struct drm_framebuffer_funcs fb_funcs = {
static struct drm_framebuffer *
fb_create(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
@@ -61,7 +62,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
struct drm_gem_object *gem_obj;
int ret;
- fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+ fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs);
if (IS_ERR(fb))
return fb;
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index dbecca9bdd54..cfabf5e2a0bb 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO
bool "ZynqMP DisplayPort Audio Support"
depends on DRM_ZYNQMP_DPSUB
depends on SND && SND_SOC
+ depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Choose this option to enable DisplayPort audio support in the ZynqMP
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a6a4a871f197..34ddbf98e81d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
}
if (dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ ret = drm_bridge_attach(encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
@@ -1719,7 +1720,8 @@ disconnected:
return connector_status_disconnected;
}
-static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+zynqmp_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1868,20 +1870,14 @@ static int zynqmp_dp_test_setup(struct zynqmp_dp *dp)
static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct dentry *dentry = file->f_path.dentry;
struct zynqmp_dp *dp = file->private_data;
char buf[16];
ssize_t ret;
- ret = debugfs_file_get(dentry);
- if (unlikely(ret))
- return ret;
-
scoped_guard(mutex, &dp->lock)
ret = snprintf(buf, sizeof(buf), "%s\n",
test_pattern_str[dp->test.pattern]);
- debugfs_file_put(dentry);
return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
}
@@ -1889,27 +1885,20 @@ static ssize_t zynqmp_dp_pattern_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct dentry *dentry = file->f_path.dentry;
struct zynqmp_dp *dp = file->private_data;
char buf[16];
ssize_t ret;
int pattern;
- ret = debugfs_file_get(dentry);
- if (unlikely(ret))
- return ret;
-
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf,
count);
if (ret < 0)
- goto out;
+ return ret;
buf[ret] = '\0';
pattern = sysfs_match_string(test_pattern_str, buf);
- if (pattern < 0) {
- ret = -EINVAL;
- goto out;
- }
+ if (pattern < 0)
+ return -EINVAL;
mutex_lock(&dp->lock);
dp->test.pattern = pattern;
@@ -1918,8 +1907,6 @@ static ssize_t zynqmp_dp_pattern_write(struct file *file,
dp->test.custom) ?: ret;
mutex_unlock(&dp->lock);
-out:
- debugfs_file_put(dentry);
return ret;
}
@@ -2025,20 +2012,13 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get,
static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct dentry *dentry = file->f_path.dentry;
struct zynqmp_dp *dp = file->private_data;
ssize_t ret;
- ret = debugfs_file_get(dentry);
- if (unlikely(ret))
- return ret;
-
mutex_lock(&dp->lock);
ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom,
sizeof(dp->test.custom));
mutex_unlock(&dp->lock);
-
- debugfs_file_put(dentry);
return ret;
}
@@ -2046,18 +2026,13 @@ static ssize_t zynqmp_dp_custom_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct dentry *dentry = file->f_path.dentry;
struct zynqmp_dp *dp = file->private_data;
ssize_t ret;
char buf[sizeof(dp->test.custom)];
- ret = debugfs_file_get(dentry);
- if (unlikely(ret))
- return ret;
-
ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
if (ret < 0)
- goto out;
+ return ret;
mutex_lock(&dp->lock);
memcpy(dp->test.custom, buf, ret);
@@ -2065,9 +2040,6 @@ static ssize_t zynqmp_dp_custom_write(struct file *file,
ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
dp->test.custom) ?: ret;
mutex_unlock(&dp->lock);
-
-out:
- debugfs_file_put(dentry);
return ret;
}
@@ -2438,9 +2410,9 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct zynqmp_dp *dp;
int ret;
- dp = kzalloc(sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(&pdev->dev, struct zynqmp_dp, bridge, &zynqmp_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
@@ -2453,33 +2425,25 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Acquire all resources (IOMEM, IRQ and PHYs). */
dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
- if (IS_ERR(dp->iomem)) {
- ret = PTR_ERR(dp->iomem);
- goto err_free;
- }
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0) {
- ret = dp->irq;
- goto err_free;
- }
+ if (dp->irq < 0)
+ return dp->irq;
dp->reset = devm_reset_control_get(dp->dev, NULL);
- if (IS_ERR(dp->reset)) {
- if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
- dev_err(dp->dev, "failed to get reset: %ld\n",
- PTR_ERR(dp->reset));
- ret = PTR_ERR(dp->reset);
- goto err_free;
- }
+ if (IS_ERR(dp->reset))
+ return dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ "failed to get reset\n");
ret = zynqmp_dp_reset(dp, true);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
@@ -2487,7 +2451,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Initialize the bridge. */
bridge = &dp->bridge;
- bridge->funcs = &zynqmp_dp_bridge_funcs;
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -2540,8 +2503,6 @@ err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
-err_free:
- kfree(dp);
return ret;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index f07ff4eb3a6d..1a46a046103f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -45,7 +45,6 @@ struct zynqmp_dpsub_audio {
struct {
struct snd_soc_dai_link_component cpu;
- struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
} components[ZYNQMP_NUM_PCMS];
@@ -403,10 +402,8 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
link->num_cpus = 1;
link->cpus[0].dai_name = audio->dai_name;
- link->codecs = &audio->components[i].codec;
+ link->codecs = &snd_soc_dummy_dlc;
link->num_codecs = 1;
- link->codecs[0].name = "snd-soc-dummy";
- link->codecs[0].dai_name = "snd-soc-dummy-dai";
link->platforms = &audio->components[i].platform;
link->num_platforms = 1;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 3a9544b97bc5..2764c4b17c5e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -180,7 +180,6 @@ static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
{
kfree(dpsub->disp);
- kfree(dpsub->dp);
kfree(dpsub);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index b47463473472..2bee0a2275ed 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -373,6 +373,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
static struct drm_framebuffer *
zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
@@ -383,7 +384,7 @@ zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
- return drm_gem_fb_create(drm, file_priv, &cmd);
+ return drm_gem_fb_create(drm, file_priv, info, &cmd);
}
static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {